id int64 6.03B 281,475B | code_before stringlengths 3 50k ⌀ | code_after stringlengths 1 50k |
|---|---|---|
7,461,293,048,740 | /* Map in a shared object's segments from the file.
Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <elf/ldsodefs.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "dynamic-link.h"
#include <stdio-common/_itoa.h>
#include <dl-dst.h>
/* On some systems, no flag bits are given to specify file mapping. */
#ifndef MAP_FILE
# define MAP_FILE 0
#endif
/* The right way to map in the shared library files is MAP_COPY, which
makes a virtual copy of the data at the time of the mmap call; this
guarantees the mapped pages will be consistent even if the file is
overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
get is MAP_PRIVATE, which copies each page when it is modified; this
means if the file is overwritten, we may at some point get some pages
from the new version after starting with pages from the old version. */
#ifndef MAP_COPY
# define MAP_COPY MAP_PRIVATE
#endif
/* Some systems link their relocatable objects for another base address
than 0. We want to know the base address for these such that we can
subtract this address from the segment addresses during mapping.
This results in a more efficient address space usage. Defaults to
zero for almost all systems. */
#ifndef MAP_BASE_ADDR
# define MAP_BASE_ADDR(l) 0
#endif
#include <endian.h>
#if BYTE_ORDER == BIG_ENDIAN
# define byteorder ELFDATA2MSB
# define byteorder_name "big-endian"
#elif BYTE_ORDER == LITTLE_ENDIAN
# define byteorder ELFDATA2LSB
# define byteorder_name "little-endian"
#else
# error "Unknown BYTE_ORDER " BYTE_ORDER
# define byteorder ELFDATANONE
#endif
#define STRING(x) __STRING (x)
#ifdef MAP_ANON
/* The fd is not examined when using MAP_ANON. */
# define ANONFD -1
#else
int _dl_zerofd = -1;
# define ANONFD _dl_zerofd
#endif
/* Handle situations where we have a preferred location in memory for
the shared objects. */
#ifdef ELF_PREFERRED_ADDRESS_DATA
ELF_PREFERRED_ADDRESS_DATA;
#endif
#ifndef ELF_PREFERRED_ADDRESS
# define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
#endif
#ifndef ELF_FIXED_ADDRESS
# define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
#endif
size_t _dl_pagesize;
extern const char *_dl_platform;
extern size_t _dl_platformlen;
/* This is the decomposed LD_LIBRARY_PATH search path. */
static struct r_search_path_elem **env_path_list;
/* List of the hardware capabilities we might end up using. */
static const struct r_strlenpair *capstr;
static size_t ncapstr;
static size_t max_capstrlen;
const unsigned char _dl_pf_to_prot[8] =
{
[0] = PROT_NONE,
[PF_R] = PROT_READ,
[PF_W] = PROT_WRITE,
[PF_R | PF_W] = PROT_READ | PROT_WRITE,
[PF_X] = PROT_EXEC,
[PF_R | PF_X] = PROT_READ | PROT_EXEC,
[PF_W | PF_X] = PROT_WRITE | PROT_EXEC,
[PF_R | PF_W | PF_X] = PROT_READ | PROT_WRITE | PROT_EXEC
};
/* Get the generated information about the trusted directories. */
#include "trusted-dirs.h"
static const char system_dirs[] = SYSTEM_DIRS;
static const size_t system_dirs_len[] =
{
SYSTEM_DIRS_LEN
};
/* This function has no public prototype. */
extern ssize_t __libc_read (int, void *, size_t);
/* Local version of `strdup' function. */
static inline char *
local_strdup (const char *s)
{
size_t len = strlen (s) + 1;
void *new = malloc (len);
if (new == NULL)
return NULL;
return (char *) memcpy (new, s, len);
}
size_t
_dl_dst_count (const char *name, int is_path)
{
size_t cnt = 0;
do
{
size_t len = 1;
/* $ORIGIN is not expanded for SUID/GUID programs. */
if ((((!__libc_enable_secure
&& strncmp (&name[1], "ORIGIN", 6) == 0 && (len = 7) != 0)
|| (strncmp (&name[1], "PLATFORM", 8) == 0 && (len = 9) != 0))
&& (name[len] == '\0' || name[len] == '/'
|| (is_path && name[len] == ':')))
|| (name[1] == '{'
&& ((!__libc_enable_secure
&& strncmp (&name[2], "ORIGIN}", 7) == 0 && (len = 9) != 0)
|| (strncmp (&name[2], "PLATFORM}", 9) == 0
&& (len = 11) != 0))))
++cnt;
name = strchr (name + len, '$');
}
while (name != NULL);
return cnt;
}
char *
_dl_dst_substitute (struct link_map *l, const char *name, char *result,
int is_path)
{
char *last_elem, *wp;
/* Now fill the result path. While copying over the string we keep
track of the start of the last path element. When we come accross
a DST we copy over the value or (if the value is not available)
leave the entire path element out. */
last_elem = wp = result;
do
{
if (*name == '$')
{
const char *repl;
size_t len;
if ((((strncmp (&name[1], "ORIGIN", 6) == 0 && (len = 7) != 0)
|| (strncmp (&name[1], "PLATFORM", 8) == 0 && (len = 9) != 0))
&& (name[len] == '\0' || name[len] == '/'
|| (is_path && name[len] == ':')))
|| (name[1] == '{'
&& ((strncmp (&name[2], "ORIGIN}", 7) == 0 && (len = 9) != 0)
|| (strncmp (&name[2], "PLATFORM}", 9) == 0
&& (len = 11) != 0))))
{
repl = ((len == 7 || name[2] == 'O')
? (__libc_enable_secure ? NULL : l->l_origin)
: _dl_platform);
if (repl != NULL && repl != (const char *) -1)
{
wp = __stpcpy (wp, repl);
name += len;
}
else
{
/* We cannot use this path element, the value of the
replacement is unknown. */
wp = last_elem;
name += len;
while (*name != '\0' && (!is_path || *name != ':'))
++name;
}
}
else
/* No DST we recognize. */
*wp++ = *name++;
}
else if (is_path && *name == ':')
{
*wp++ = *name++;
last_elem = wp;
}
else
*wp++ = *name++;
}
while (*name != '\0');
*wp = '\0';
return result;
}
/* Return copy of argument with all recognized dynamic string tokens
($ORIGIN and $PLATFORM for now) replaced. On some platforms it
might not be possible to determine the path from which the object
belonging to the map is loaded. In this case the path element
containing $ORIGIN is left out. */
static char *
expand_dynamic_string_token (struct link_map *l, const char *s)
{
/* We make two runs over the string. First we determine how large the
resulting string is and then we copy it over. Since this is now
frequently executed operation we are looking here not for performance
but rather for code size. */
size_t cnt;
size_t total;
char *result;
/* Determine the nubmer of DST elements. */
cnt = DL_DST_COUNT (s, 1);
/* If we do not have to replace anything simply copy the string. */
if (cnt == 0)
return local_strdup (s);
/* Determine the length of the substituted string. */
total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
/* Allocate the necessary memory. */
result = (char *) malloc (total + 1);
if (result == NULL)
return NULL;
return DL_DST_SUBSTITUTE (l, s, result, 1);
}
/* Add `name' to the list of names for a particular shared object.
`name' is expected to have been allocated with malloc and will
be freed if the shared object already has this name.
Returns false if the object already had this name. */
static void
internal_function
add_name_to_object (struct link_map *l, const char *name)
{
struct libname_list *lnp, *lastp;
struct libname_list *newname;
size_t name_len;
lastp = NULL;
for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
if (strcmp (name, lnp->name) == 0)
return;
name_len = strlen (name) + 1;
newname = malloc (sizeof *newname + name_len);
if (newname == NULL)
{
/* No more memory. */
_dl_signal_error (ENOMEM, name, "cannot allocate name record");
return;
}
/* The object should have a libname set from _dl_new_object. */
assert (lastp != NULL);
newname->name = memcpy (newname + 1, name, name_len);
newname->next = NULL;
lastp->next = newname;
}
/* All known directories in sorted order. */
static struct r_search_path_elem *all_dirs;
/* Standard search directories. */
static struct r_search_path_elem **rtld_search_dirs;
static size_t max_dirnamelen;
static inline struct r_search_path_elem **
fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
int check_trusted, const char *what, const char *where)
{
char *cp;
size_t nelems = 0;
while ((cp = __strsep (&rpath, sep)) != NULL)
{
struct r_search_path_elem *dirp;
size_t len = strlen (cp);
/* `strsep' can pass an empty string. This has to be
interpreted as `use the current directory'. */
if (len == 0)
{
static const char curwd[] = "./";
cp = (char *) curwd;
}
/* Remove trailing slashes (except for "/"). */
while (len > 1 && cp[len - 1] == '/')
--len;
/* Now add one if there is none so far. */
if (len > 0 && cp[len - 1] != '/')
cp[len++] = '/';
/* Make sure we don't use untrusted directories if we run SUID. */
if (check_trusted)
{
const char *trun = system_dirs;
size_t idx;
/* All trusted directories must be complete names. */
if (cp[0] != '/')
continue;
for (idx = 0;
idx < sizeof (system_dirs_len) / sizeof (system_dirs_len[0]);
++idx)
{
if (len == system_dirs_len[idx] && memcmp (trun, cp, len) == 0)
/* Found it. */
break;
trun += system_dirs_len[idx] + 1;
}
if (idx == sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
/* It's no trusted directory, skip it. */
continue;
}
/* See if this directory is already known. */
for (dirp = all_dirs; dirp != NULL; dirp = dirp->next)
if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
break;
if (dirp != NULL)
{
/* It is available, see whether it's on our own list. */
size_t cnt;
for (cnt = 0; cnt < nelems; ++cnt)
if (result[cnt] == dirp)
break;
if (cnt == nelems)
result[nelems++] = dirp;
}
else
{
size_t cnt;
/* It's a new directory. Create an entry and add it. */
dirp = (struct r_search_path_elem *)
malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status));
if (dirp == NULL)
_dl_signal_error (ENOMEM, NULL,
"cannot create cache for search path");
dirp->dirname = cp;
dirp->dirnamelen = len;
if (len > max_dirnamelen)
max_dirnamelen = len;
/* We have to make sure all the relative directories are never
ignored. The current directory might change and all our
saved information would be void. */
if (cp[0] != '/')
for (cnt = 0; cnt < ncapstr; ++cnt)
dirp->status[cnt] = existing;
else
for (cnt = 0; cnt < ncapstr; ++cnt)
dirp->status[cnt] = unknown;
dirp->what = what;
dirp->where = where;
dirp->next = all_dirs;
all_dirs = dirp;
/* Put it in the result array. */
result[nelems++] = dirp;
}
}
/* Terminate the array. */
result[nelems] = NULL;
return result;
}
static struct r_search_path_elem **
internal_function
decompose_rpath (const char *rpath, struct link_map *l, const char *what)
{
/* Make a copy we can work with. */
const char *where = l->l_name;
char *copy;
char *cp;
struct r_search_path_elem **result;
size_t nelems;
/* First see whether we must forget the RUNPATH and RPATH from this
object. */
if (_dl_inhibit_rpath != NULL && !__libc_enable_secure)
{
const char *found = strstr (_dl_inhibit_rpath, where);
if (found != NULL)
{
size_t len = strlen (where);
if ((found == _dl_inhibit_rpath || found[-1] == ':')
&& (found[len] == '\0' || found[len] == ':'))
{
/* This object is on the list of objects for which the
RUNPATH and RPATH must not be used. */
result = (struct r_search_path_elem **)
malloc (sizeof (*result));
if (result == NULL)
_dl_signal_error (ENOMEM, NULL,
"cannot create cache for search path");
result[0] = NULL;
return result;
}
}
}
/* Make a writable copy. At the same time expand possible dynamic
string tokens. */
copy = expand_dynamic_string_token (l, rpath);
if (copy == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create RUNPATH/RPATH copy");
/* Count the number of necessary elements in the result array. */
nelems = 0;
for (cp = copy; *cp != '\0'; ++cp)
if (*cp == ':')
++nelems;
/* Allocate room for the result. NELEMS + 1 is an upper limit for the
number of necessary entries. */
result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
* sizeof (*result));
if (result == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create cache for search path");
return fillin_rpath (copy, result, ":", 0, what, where);
}
void
internal_function
_dl_init_paths (const char *llp)
{
size_t idx;
const char *strp;
struct r_search_path_elem *pelem, **aelem;
size_t round_size;
#ifdef PIC
struct link_map *l;
#endif
/* Fill in the information about the application's RPATH and the
directories addressed by the LD_LIBRARY_PATH environment variable. */
/* Get the capabilities. */
capstr = _dl_important_hwcaps (_dl_platform, _dl_platformlen,
&ncapstr, &max_capstrlen);
/* First set up the rest of the default search directory entries. */
aelem = rtld_search_dirs = (struct r_search_path_elem **)
malloc ((sizeof (system_dirs_len) / sizeof (system_dirs_len[0]) + 1)
* sizeof (struct r_search_path_elem *));
if (rtld_search_dirs == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create search path array");
round_size = ((2 * sizeof (struct r_search_path_elem) - 1
+ ncapstr * sizeof (enum r_dir_status))
/ sizeof (struct r_search_path_elem));
rtld_search_dirs[0] = (struct r_search_path_elem *)
malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]) - 1)
* round_size * sizeof (struct r_search_path_elem));
if (rtld_search_dirs[0] == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create cache for search path");
pelem = all_dirs = rtld_search_dirs[0];
strp = system_dirs;
idx = 0;
do
{
size_t cnt;
*aelem++ = pelem;
pelem->what = "system search path";
pelem->where = NULL;
pelem->dirname = strp;
pelem->dirnamelen = system_dirs_len[idx];
strp += system_dirs_len[idx] + 1;
if (pelem->dirname[0] != '/')
for (cnt = 0; cnt < ncapstr; ++cnt)
pelem->status[cnt] = existing;
else
for (cnt = 0; cnt < ncapstr; ++cnt)
pelem->status[cnt] = unknown;
pelem->next = (++idx == (sizeof (system_dirs_len)
/ sizeof (system_dirs_len[0]))
? NULL : (pelem + round_size));
pelem += round_size;
}
while (idx < sizeof (system_dirs_len) / sizeof (system_dirs_len[0]));
max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
*aelem = NULL;
#ifdef PIC
/* This points to the map of the main object. */
l = _dl_loaded;
if (l != NULL)
{
assert (l->l_type != lt_loaded);
if (l->l_info[DT_RUNPATH])
{
/* Allocate room for the search path and fill in information
from RUNPATH. */
l->l_runpath_dirs =
decompose_rpath ((const void *) (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_RUNPATH]->d_un.d_val),
l, "RUNPATH");
/* The RPATH is ignored. */
l->l_rpath_dirs = NULL;
}
else
{
l->l_runpath_dirs = NULL;
if (l->l_info[DT_RPATH])
/* Allocate room for the search path and fill in information
from RPATH. */
l->l_rpath_dirs =
decompose_rpath ((const void *) (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_RPATH]->d_un.d_val),
l, "RPATH");
else
l->l_rpath_dirs = NULL;
}
}
#endif /* PIC */
if (llp != NULL && *llp != '\0')
{
size_t nllp;
const char *cp = llp;
/* Decompose the LD_LIBRARY_PATH contents. First determine how many
elements it has. */
nllp = 1;
while (*cp)
{
if (*cp == ':' || *cp == ';')
++nllp;
++cp;
}
env_path_list = (struct r_search_path_elem **)
malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
if (env_path_list == NULL)
_dl_signal_error (ENOMEM, NULL,
"cannot create cache for search path");
(void) fillin_rpath (local_strdup (llp), env_path_list, ":;",
__libc_enable_secure, "LD_LIBRARY_PATH", NULL);
}
}
/* Think twice before changing anything in this function. It is placed
here and prepared using the `alloca' magic to prevent it from being
inlined. The function is only called in case of an error. But then
performance does not count. The function used to be "inlinable" and
the compiled did so all the time. This increased the code size for
absolutely no good reason. */
#define LOSE(code, s) lose (code, fd, name, realname, l, s)
static void
__attribute__ ((noreturn))
lose (int code, int fd, const char *name, char *realname, struct link_map *l,
const char *msg)
{
/* The use of `alloca' here looks ridiculous but it helps. The goal
is to avoid the function from being inlined. There is no official
way to do this so we use this trick. gcc never inlines functions
which use `alloca'. */
int *a = alloca (sizeof (int));
a[0] = fd;
(void) __close (a[0]);
if (l != NULL)
{
/* Remove the stillborn object from the list and free it. */
if (l->l_prev)
l->l_prev->l_next = l->l_next;
if (l->l_next)
l->l_next->l_prev = l->l_prev;
free (l);
}
free (realname);
_dl_signal_error (code, name, msg);
}
/* Map in the shared object NAME, actually located in REALNAME, and already
opened on FD. */
#ifndef EXTERNAL_MAP_FROM_FD
static
#endif
struct link_map *
_dl_map_object_from_fd (const char *name, int fd, char *realname,
struct link_map *loader, int l_type)
{
/* This is the expected ELF header. */
#define ELF32_CLASS ELFCLASS32
#define ELF64_CLASS ELFCLASS64
#ifndef VALID_ELF_HEADER
# define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
# define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
# define VALID_ELF_ABIVERSION(ver) (ver == 0)
#endif
static const unsigned char expected[EI_PAD] =
{
[EI_MAG0] = ELFMAG0,
[EI_MAG1] = ELFMAG1,
[EI_MAG2] = ELFMAG2,
[EI_MAG3] = ELFMAG3,
[EI_CLASS] = ELFW(CLASS),
[EI_DATA] = byteorder,
[EI_VERSION] = EV_CURRENT,
[EI_OSABI] = ELFOSABI_SYSV,
[EI_ABIVERSION] = 0
};
struct link_map *l = NULL;
inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
int prot, int fixed, off_t offset)
{
caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
fixed|MAP_COPY|MAP_FILE,
fd, offset);
if (mapat == MAP_FAILED)
LOSE (errno, "failed to map segment from shared object");
return mapat;
}
const ElfW(Ehdr) *header;
const ElfW(Phdr) *phdr;
const ElfW(Phdr) *ph;
size_t maplength;
int type;
char *readbuf;
ssize_t readlength;
struct stat st;
/* Get file information. */
if (__fxstat (_STAT_VER, fd, &st) < 0)
LOSE (errno, "cannot stat shared object");
/* Look again to see if the real name matched another already loaded. */
for (l = _dl_loaded; l; l = l->l_next)
if (l->l_ino == st.st_ino && l->l_dev == st.st_dev)
{
/* The object is already loaded.
Just bump its reference count and return it. */
__close (fd);
/* If the name is not in the list of names for this object add
it. */
free (realname);
add_name_to_object (l, name);
++l->l_opencount;
return l;
}
/* Print debugging message. */
if (_dl_debug_files)
_dl_debug_message (1, "file=", name, "; generating link map\n", NULL);
/* Read the header directly. */
readbuf = alloca (_dl_pagesize);
readlength = __libc_read (fd, readbuf, _dl_pagesize);
if (readlength < (ssize_t) sizeof (*header))
LOSE (errno, "cannot read file data");
header = (void *) readbuf;
/* Check the header for basic validity. */
if (__builtin_expect (!VALID_ELF_HEADER (header->e_ident, expected, EI_PAD),
0))
{
/* Something is wrong. */
if (*(Elf32_Word *) &header->e_ident !=
#if BYTE_ORDER == LITTLE_ENDIAN
((ELFMAG0 << (EI_MAG0 * 8)) |
(ELFMAG1 << (EI_MAG1 * 8)) |
(ELFMAG2 << (EI_MAG2 * 8)) |
(ELFMAG3 << (EI_MAG3 * 8)))
#else
((ELFMAG0 << (EI_MAG3 * 8)) |
(ELFMAG1 << (EI_MAG2 * 8)) |
(ELFMAG2 << (EI_MAG1 * 8)) |
(ELFMAG3 << (EI_MAG0 * 8)))
#endif
)
LOSE (0, "invalid ELF header");
if (header->e_ident[EI_CLASS] != ELFW(CLASS))
LOSE (0, "ELF file class not " STRING(__ELF_NATIVE_CLASS) "-bit");
if (header->e_ident[EI_DATA] != byteorder)
LOSE (0, "ELF file data encoding not " byteorder_name);
if (header->e_ident[EI_VERSION] != EV_CURRENT)
LOSE (0, "ELF file version ident not " STRING(EV_CURRENT));
/* XXX We should be able so set system specific versions which are
allowed here. */
if (!VALID_ELF_OSABI (header->e_ident[EI_OSABI]))
LOSE (0, "ELF file OS ABI invalid.");
if (!VALID_ELF_ABIVERSION (header->e_ident[EI_ABIVERSION]))
LOSE (0, "ELF file ABI version invalid.");
LOSE (0, "internal error");
}
if (__builtin_expect (header->e_version, EV_CURRENT) != EV_CURRENT)
LOSE (0, "ELF file version not " STRING(EV_CURRENT));
if (! __builtin_expect (elf_machine_matches_host (header->e_machine), 1))
LOSE (0, "ELF file machine architecture not " ELF_MACHINE_NAME);
if (__builtin_expect (header->e_phentsize, sizeof (ElfW(Phdr)))
!= sizeof (ElfW(Phdr)))
LOSE (0, "ELF file's phentsize not the expected size");
#ifndef MAP_ANON
# define MAP_ANON 0
if (_dl_zerofd == -1)
{
_dl_zerofd = _dl_sysdep_open_zero_fill ();
if (_dl_zerofd == -1)
{
__close (fd);
_dl_signal_error (errno, NULL, "cannot open zero fill device");
}
}
#endif
/* Enter the new object in the list of loaded objects. */
l = _dl_new_object (realname, name, l_type, loader);
if (__builtin_expect (! l, 0))
LOSE (ENOMEM, "cannot create shared object descriptor");
l->l_opencount = 1;
/* Extract the remaining details we need from the ELF header
and then read in the program header table. */
l->l_entry = header->e_entry;
type = header->e_type;
l->l_phnum = header->e_phnum;
maplength = header->e_phnum * sizeof (ElfW(Phdr));
if (header->e_phoff + maplength <= readlength)
phdr = (void *) (readbuf + header->e_phoff);
else
{
phdr = alloca (maplength);
__lseek (fd, SEEK_SET, header->e_phoff);
if (__libc_read (fd, (void *) phdr, maplength) != maplength)
LOSE (errno, "cannot read file data");
}
{
/* Scan the program header table, collecting its load commands. */
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
off_t mapoff;
int prot;
} loadcmds[l->l_phnum], *c;
size_t nloadcmds = 0;
/* The struct is initialized to zero so this is not necessary:
l->l_ld = 0;
l->l_phdr = 0;
l->l_addr = 0; */
for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
switch (ph->p_type)
{
/* These entries tell us where to find things once the file's
segments are mapped in. We record the addresses it says
verbatim, and later correct for the run-time load address. */
case PT_DYNAMIC:
l->l_ld = (void *) ph->p_vaddr;
break;
case PT_PHDR:
l->l_phdr = (void *) ph->p_vaddr;
break;
case PT_LOAD:
/* A load command tells us to map in part of the file.
We record the load commands and process them all later. */
if (ph->p_align % _dl_pagesize != 0)
LOSE (0, "ELF load command alignment not page-aligned");
if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
LOSE (0, "ELF load command address/offset not properly aligned");
{
struct loadcmd *c = &loadcmds[nloadcmds++];
c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)
& ~(_dl_pagesize - 1));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapoff = ph->p_offset & ~(ph->p_align - 1);
/* Optimize a common case. */
if ((PF_R | PF_W | PF_X) == 7
&& (PROT_READ | PROT_WRITE | PROT_EXEC) == 7)
c->prot = _dl_pf_to_prot[ph->p_flags & (PF_R | PF_W | PF_X)];
else
{
c->prot = 0;
if (ph->p_flags & PF_R)
c->prot |= PROT_READ;
if (ph->p_flags & PF_W)
c->prot |= PROT_WRITE;
if (ph->p_flags & PF_X)
c->prot |= PROT_EXEC;
}
break;
}
}
/* Now process the load commands and map segments into memory. */
c = loadcmds;
/* Length of the sections to be loaded. */
maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
if (type == ET_DYN || type == ET_REL)
{
/* This is a position-independent shared object. We can let the
kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its
extent increased to cover all the segments. Then we remove
access from excess portion, and there is known sufficient space
there to remap from the later segments.
As a refinement, sometimes we have an address that we would
prefer to map such objects at; but this is only a preference,
the OS can do whatever it likes. */
caddr_t mapat;
ElfW(Addr) mappref;
mappref = (ELF_PREFERRED_ADDRESS (loader, maplength, c->mapstart)
- MAP_BASE_ADDR (l));
mapat = map_segment (mappref, maplength, c->prot, 0, c->mapoff);
l->l_addr = (ElfW(Addr)) mapat - c->mapstart;
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated. Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping. */
__mprotect ((caddr_t) (l->l_addr + c->mapend),
loadcmds[nloadcmds - 1].allocend - c->mapend,
0);
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
goto postmap;
}
else
{
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed. */
ELF_FIXED_ADDRESS (loader, c->mapstart);
}
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
while (c < &loadcmds[nloadcmds])
{
if (c->mapend > c->mapstart)
/* Map the segment contents from the file. */
map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
c->prot, MAP_FIXED, c->mapoff);
postmap:
if (l->l_phdr == 0
&& c->mapoff <= header->e_phoff
&& (c->mapend - c->mapstart + c->mapoff
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment. */
l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
ElfW(Addr) zero, zeroend, zeropage;
zero = l->l_addr + c->dataend;
zeroend = l->l_addr + c->allocend;
zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it. */
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment. */
if ((c->prot & PROT_WRITE) == 0)
{
/* Dag nab it. */
if (__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
_dl_pagesize, c->prot|PROT_WRITE) < 0)
LOSE (errno, "cannot change memory protections");
}
memset ((void *) zero, 0, zeropage - zero);
if ((c->prot & PROT_WRITE) == 0)
__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
_dl_pagesize, c->prot);
}
if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD. */
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
ANONFD, 0);
if (mapat == MAP_FAILED)
LOSE (errno, "cannot map zero-fill pages");
}
}
++c;
}
if (l->l_phdr == NULL)
{
/* The program header is not contained in any of the segmenst.
We have to allocate memory ourself and copy it over from
out temporary place. */
ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
* sizeof (ElfW(Phdr)));
if (newp == NULL)
LOSE (ENOMEM, "cannot allocate memory for program header");
l->l_phdr = memcpy (newp, phdr,
(header->e_phnum * sizeof (ElfW(Phdr))));
l->l_phdr_allocated = 1;
}
else
/* Adjust the PT_PHDR value by the runtime load address. */
(ElfW(Addr)) l->l_phdr += l->l_addr;
}
/* We are done mapping in the file. We no longer need the descriptor. */
__close (fd);
if (l->l_type == lt_library && type == ET_EXEC)
l->l_type = lt_executable;
if (l->l_ld == 0)
{
if (type == ET_DYN)
LOSE (0, "object file has no dynamic section");
}
else
(ElfW(Addr)) l->l_ld += l->l_addr;
l->l_entry += l->l_addr;
if (_dl_debug_files)
{
const size_t nibbles = sizeof (void *) * 2;
char buf1[nibbles + 1];
char buf2[nibbles + 1];
char buf3[nibbles + 1];
buf1[nibbles] = '\0';
buf2[nibbles] = '\0';
buf3[nibbles] = '\0';
memset (buf1, '0', nibbles);
memset (buf2, '0', nibbles);
memset (buf3, '0', nibbles);
_itoa_word ((unsigned long int) l->l_ld, &buf1[nibbles], 16, 0);
_itoa_word ((unsigned long int) l->l_addr, &buf2[nibbles], 16, 0);
_itoa_word (maplength, &buf3[nibbles], 16, 0);
_dl_debug_message (1, " dynamic: 0x", buf1, " base: 0x", buf2,
" size: 0x", buf3, "\n", NULL);
memset (buf1, '0', nibbles);
memset (buf2, '0', nibbles);
memset (buf3, ' ', nibbles);
_itoa_word ((unsigned long int) l->l_entry, &buf1[nibbles], 16, 0);
_itoa_word ((unsigned long int) l->l_phdr, &buf2[nibbles], 16, 0);
_itoa_word (l->l_phnum, &buf3[nibbles], 10, 0);
_dl_debug_message (1, " entry: 0x", buf1, " phdr: 0x", buf2,
" phnum: ", buf3, "\n\n", NULL);
}
elf_get_dynamic_info (l);
if (l->l_info[DT_HASH])
_dl_setup_hash (l);
/* If this object has DT_SYMBOLIC set modify now its scope. We don't
have to do this for the main map. */
if (l->l_info[DT_SYMBOLIC] && &l->l_searchlist != l->l_scope[0])
{
/* Create an appropriate searchlist. It contains only this map.
XXX This is the definition of DT_SYMBOLIC in SysVr4. The old
GNU ld.so implementation had a different interpretation which
is more reasonable. We are prepared to add this possibility
back as part of a GNU extension of the ELF format. */
l->l_symbolic_searchlist.r_list =
(struct link_map **) malloc (sizeof (struct link_map *));
if (l->l_symbolic_searchlist.r_list == NULL)
LOSE (ENOMEM, "cannot create searchlist");
l->l_symbolic_searchlist.r_list[0] = l;
l->l_symbolic_searchlist.r_nlist = 1;
l->l_symbolic_searchlist.r_duplist = l->l_symbolic_searchlist.r_list;
l->l_symbolic_searchlist.r_nduplist = 1;
/* Now move the existing entries one back. */
memmove (&l->l_scope[1], &l->l_scope[0],
sizeof (l->l_scope) - sizeof (l->l_scope[0]));
/* Now add the new entry. */
l->l_scope[0] = &l->l_symbolic_searchlist;
}
/* Finally the file information. */
l->l_dev = st.st_dev;
l->l_ino = st.st_ino;
return l;
}
/* Print search path. */
static void
print_search_path (struct r_search_path_elem **list,
const char *what, const char *name)
{
char buf[max_dirnamelen + max_capstrlen];
int first = 1;
_dl_debug_message (1, " search path=", NULL);
while (*list != NULL && (*list)->what == what) /* Yes, ==. */
{
char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
size_t cnt;
for (cnt = 0; cnt < ncapstr; ++cnt)
if ((*list)->status[cnt] != nonexisting)
{
char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
cp[0] = '\0';
else
cp[-1] = '\0';
_dl_debug_message (0, first ? "" : ":", buf, NULL);
first = 0;
}
++list;
}
if (name != NULL)
_dl_debug_message (0, "\t\t(", what, " from file ",
name[0] ? name : _dl_argv[0], ")\n", NULL);
else
_dl_debug_message (0, "\t\t(", what, ")\n", NULL);
}
/* Try to open NAME in one of the directories in DIRS.
Return the fd, or -1. If successful, fill in *REALNAME
with the malloc'd full directory name. */
static int
open_path (const char *name, size_t namelen, int preloaded,
struct r_search_path_elem **dirs,
char **realname)
{
char *buf;
int fd = -1;
const char *current_what = NULL;
if (dirs == NULL || *dirs == NULL)
{
__set_errno (ENOENT);
return -1;
}
buf = alloca (max_dirnamelen + max_capstrlen + namelen);
do
{
struct r_search_path_elem *this_dir = *dirs;
size_t buflen = 0;
size_t cnt;
char *edp;
/* If we are debugging the search for libraries print the path
now if it hasn't happened now. */
if (_dl_debug_libs && current_what != this_dir->what)
{
current_what = this_dir->what;
print_search_path (dirs, current_what, this_dir->where);
}
edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
{
/* Skip this directory if we know it does not exist. */
if (this_dir->status[cnt] == nonexisting)
continue;
buflen =
((char *) __mempcpy (__mempcpy (edp,
capstr[cnt].str, capstr[cnt].len),
name, namelen)
- buf);
/* Print name we try if this is wanted. */
if (_dl_debug_libs)
_dl_debug_message (1, " trying file=", buf, "\n", NULL);
fd = __open (buf, O_RDONLY);
if (this_dir->status[cnt] == unknown)
{
if (fd != -1)
this_dir->status[cnt] = existing;
else
{
/* We failed to open machine dependent library. Let's
test whether there is any directory at all. */
struct stat st;
buf[buflen - namelen - 1] = '\0';
if (__xstat (_STAT_VER, buf, &st) != 0
|| ! S_ISDIR (st.st_mode))
/* The directory does not exist or it is no directory. */
this_dir->status[cnt] = nonexisting;
else
this_dir->status[cnt] = existing;
}
}
if (fd != -1 && preloaded && __libc_enable_secure)
{
/* This is an extra security effort to make sure nobody can
preload broken shared objects which are in the trusted
directories and so exploit the bugs. */
struct stat st;
if (__fxstat (_STAT_VER, fd, &st) != 0
|| (st.st_mode & S_ISUID) == 0)
{
/* The shared object cannot be tested for being SUID
or this bit is not set. In this case we must not
use this object. */
__close (fd);
fd = -1;
/* We simply ignore the file, signal this by setting
the error value which would have been set by `open'. */
errno = ENOENT;
}
}
}
if (fd != -1)
{
*realname = malloc (buflen);
if (*realname != NULL)
{
memcpy (*realname, buf, buflen);
return fd;
}
else
{
/* No memory for the name, we certainly won't be able
to load and link it. */
__close (fd);
return -1;
}
}
if (errno != ENOENT && errno != EACCES)
/* The file exists and is readable, but something went wrong. */
return -1;
}
while (*++dirs != NULL);
return -1;
}
/* Map in the shared object file NAME. */
struct link_map *
internal_function
_dl_map_object (struct link_map *loader, const char *name, int preloaded,
int type, int trace_mode)
{
int fd;
char *realname;
char *name_copy;
struct link_map *l;
/* Look for this name among those already loaded. */
for (l = _dl_loaded; l; l = l->l_next)
{
/* If the requested name matches the soname of a loaded object,
use that object. Elide this check for names that have not
yet been opened. */
if (l->l_opencount <= 0)
continue;
if (!_dl_name_match_p (name, l))
{
const char *soname;
if (l->l_info[DT_SONAME] == NULL)
continue;
soname = (const void *) (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_SONAME]->d_un.d_val);
if (strcmp (name, soname) != 0)
continue;
/* We have a match on a new name -- cache it. */
add_name_to_object (l, soname);
}
/* We have a match -- bump the reference count and return it. */
++l->l_opencount;
return l;
}
/* Display information if we are debugging. */
if (_dl_debug_files && loader != NULL)
_dl_debug_message (1, "\nfile=", name, "; needed by ",
loader->l_name[0] ? loader->l_name : _dl_argv[0],
"\n", NULL);
if (strchr (name, '/') == NULL)
{
/* Search for NAME in several places. */
size_t namelen = strlen (name) + 1;
if (_dl_debug_libs)
_dl_debug_message (1, "find library=", name, "; searching\n", NULL);
fd = -1;
/* When the object has the RUNPATH information we don't use any
RPATHs. */
if (loader != NULL && loader->l_info[DT_RUNPATH] == NULL)
{
/* First try the DT_RPATH of the dependent object that caused NAME
to be loaded. Then that object's dependent, and on up. */
for (l = loader; fd == -1 && l; l = l->l_loader)
if (l->l_info[DT_RPATH])
{
/* Make sure the cache information is available. */
if (l->l_rpath_dirs == NULL)
{
size_t ptrval = (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_RPATH]->d_un.d_val);
l->l_rpath_dirs =
decompose_rpath ((const char *) ptrval, l, "RPATH");
}
if (l->l_rpath_dirs != NULL)
fd = open_path (name, namelen, preloaded, l->l_rpath_dirs,
&realname);
}
/* If dynamically linked, try the DT_RPATH of the executable
itself. */
l = _dl_loaded;
if (fd == -1 && l && l->l_type != lt_loaded && l != loader
&& l->l_rpath_dirs != NULL)
fd = open_path (name, namelen, preloaded, l->l_rpath_dirs,
&realname);
}
/* Try the LD_LIBRARY_PATH environment variable. */
if (fd == -1 && env_path_list != NULL)
fd = open_path (name, namelen, preloaded, env_path_list, &realname);
/* Look at the RUNPATH informaiton for this binary. */
if (loader != NULL && loader->l_info[DT_RUNPATH])
{
/* Make sure the cache information is available. */
if (loader->l_runpath_dirs == NULL)
{
size_t ptrval = (loader->l_info[DT_STRTAB]->d_un.d_ptr
+ loader->l_info[DT_RUNPATH]->d_un.d_val);
loader->l_runpath_dirs =
decompose_rpath ((const char *) ptrval, loader, "RUNPATH");
}
if (loader->l_runpath_dirs != NULL)
fd = open_path (name, namelen, preloaded, loader->l_runpath_dirs,
&realname);
}
if (fd == -1)
{
/* Check the list of libraries in the file /etc/ld.so.cache,
for compatibility with Linux's ldconfig program. */
extern const char *_dl_load_cache_lookup (const char *name);
const char *cached = _dl_load_cache_lookup (name);
if (cached)
{
fd = __open (cached, O_RDONLY);
if (fd != -1)
{
realname = local_strdup (cached);
if (realname == NULL)
{
__close (fd);
fd = -1;
}
}
}
}
/* Finally, try the default path. */
if (fd == -1)
fd = open_path (name, namelen, preloaded, rtld_search_dirs, &realname);
/* Add another newline when we a tracing the library loading. */
if (_dl_debug_libs)
_dl_debug_message (1, "\n", NULL);
}
else
{
/* The path may contain dynamic string tokens. */
realname = (loader
? expand_dynamic_string_token (loader, name)
: local_strdup (name));
if (realname == NULL)
fd = -1;
else
{
fd = __open (realname, O_RDONLY);
if (fd == -1)
free (realname);
}
}
if (fd == -1)
{
if (trace_mode)
{
/* We haven't found an appropriate library. But since we
are only interested in the list of libraries this isn't
so severe. Fake an entry with all the information we
have. */
static const Elf_Symndx dummy_bucket = STN_UNDEF;
/* Enter the new object in the list of loaded objects. */
if ((name_copy = local_strdup (name)) == NULL
|| (l = _dl_new_object (name_copy, name, type, loader)) == NULL)
_dl_signal_error (ENOMEM, name,
"cannot create shared object descriptor");
/* We use an opencount of 0 as a sign for the faked entry.
Since the descriptor is initialized with zero we do not
have do this here.
l->l_opencount = 0;
l->l_reserved = 0; */
l->l_buckets = &dummy_bucket;
l->l_nbuckets = 1;
l->l_relocated = 1;
return l;
}
else
_dl_signal_error (errno, name, "cannot open shared object file");
}
return _dl_map_object_from_fd (name, fd, realname, loader, type);
}
| /* Map in a shared object's segments from the file.
Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <elf/ldsodefs.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "dynamic-link.h"
#include <stdio-common/_itoa.h>
#include <dl-dst.h>
/* On some systems, no flag bits are given to specify file mapping. */
#ifndef MAP_FILE
# define MAP_FILE 0
#endif
/* The right way to map in the shared library files is MAP_COPY, which
makes a virtual copy of the data at the time of the mmap call; this
guarantees the mapped pages will be consistent even if the file is
overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
get is MAP_PRIVATE, which copies each page when it is modified; this
means if the file is overwritten, we may at some point get some pages
from the new version after starting with pages from the old version. */
#ifndef MAP_COPY
# define MAP_COPY MAP_PRIVATE
#endif
/* Some systems link their relocatable objects for another base address
than 0. We want to know the base address for these such that we can
subtract this address from the segment addresses during mapping.
This results in a more efficient address space usage. Defaults to
zero for almost all systems. */
#ifndef MAP_BASE_ADDR
# define MAP_BASE_ADDR(l) 0
#endif
#include <endian.h>
#if BYTE_ORDER == BIG_ENDIAN
# define byteorder ELFDATA2MSB
# define byteorder_name "big-endian"
#elif BYTE_ORDER == LITTLE_ENDIAN
# define byteorder ELFDATA2LSB
# define byteorder_name "little-endian"
#else
# error "Unknown BYTE_ORDER " BYTE_ORDER
# define byteorder ELFDATANONE
#endif
#define STRING(x) __STRING (x)
#ifdef MAP_ANON
/* The fd is not examined when using MAP_ANON. */
# define ANONFD -1
#else
int _dl_zerofd = -1;
# define ANONFD _dl_zerofd
#endif
/* Handle situations where we have a preferred location in memory for
the shared objects. */
#ifdef ELF_PREFERRED_ADDRESS_DATA
ELF_PREFERRED_ADDRESS_DATA;
#endif
#ifndef ELF_PREFERRED_ADDRESS
# define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
#endif
#ifndef ELF_FIXED_ADDRESS
# define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
#endif
size_t _dl_pagesize;
extern const char *_dl_platform;
extern size_t _dl_platformlen;
/* This is the decomposed LD_LIBRARY_PATH search path. */
static struct r_search_path_elem **env_path_list;
/* List of the hardware capabilities we might end up using. */
static const struct r_strlenpair *capstr;
static size_t ncapstr;
static size_t max_capstrlen;
const unsigned char _dl_pf_to_prot[8] =
{
[0] = PROT_NONE,
[PF_R] = PROT_READ,
[PF_W] = PROT_WRITE,
[PF_R | PF_W] = PROT_READ | PROT_WRITE,
[PF_X] = PROT_EXEC,
[PF_R | PF_X] = PROT_READ | PROT_EXEC,
[PF_W | PF_X] = PROT_WRITE | PROT_EXEC,
[PF_R | PF_W | PF_X] = PROT_READ | PROT_WRITE | PROT_EXEC
};
/* Get the generated information about the trusted directories. */
#include "trusted-dirs.h"
static const char system_dirs[] = SYSTEM_DIRS;
static const size_t system_dirs_len[] =
{
SYSTEM_DIRS_LEN
};
/* This function has no public prototype. */
extern ssize_t __libc_read (int, void *, size_t);
/* Local version of `strdup' function. */
static inline char *
local_strdup (const char *s)
{
size_t len = strlen (s) + 1;
void *new = malloc (len);
if (new == NULL)
return NULL;
return (char *) memcpy (new, s, len);
}
size_t
_dl_dst_count (const char *name, int is_path)
{
const char *const start = name;
size_t cnt = 0;
do
{
size_t len = 1;
/* $ORIGIN is not expanded for SUID/GUID programs.
Note that it is no bug that the strings in the first two `strncmp'
calls are longer than the sequence which is actually tested. */
if ((((strncmp (&name[1], "ORIGIN}", 6) == 0
&& (!__libc_enable_secure
|| ((name[7] == '\0' || (is_path && name[7] == ':'))
&& (name == start || (is_path && name[-1] == ':'))))
&& (len = 7) != 0)
|| (strncmp (&name[1], "PLATFORM}", 8) == 0 && (len = 9) != 0))
&& (name[len] == '\0' || name[len] == '/'
|| (is_path && name[len] == ':')))
|| (name[1] == '{'
&& ((strncmp (&name[2], "ORIGIN}", 7) == 0
&& (!__libc_enable_secure
|| ((name[9] == '\0' || (is_path && name[9] == ':'))
&& (name == start || (is_path && name[-1] == ':'))))
&& (len = 9) != 0)
|| (strncmp (&name[2], "PLATFORM}", 9) == 0
&& (len = 11) != 0))))
++cnt;
name = strchr (name + len, '$');
}
while (name != NULL);
return cnt;
}
char *
_dl_dst_substitute (struct link_map *l, const char *name, char *result,
int is_path)
{
const char *const start = name;
char *last_elem, *wp;
/* Now fill the result path. While copying over the string we keep
track of the start of the last path element. When we come accross
a DST we copy over the value or (if the value is not available)
leave the entire path element out. */
last_elem = wp = result;
do
{
if (*name == '$')
{
const char *repl;
size_t len;
/* Note that it is no bug that the strings in the first two `strncmp'
calls are longer than the sequence which is actually tested. */
if ((((strncmp (&name[1], "ORIGIN}", 6) == 0 && (len = 7) != 0)
|| (strncmp (&name[1], "PLATFORM}", 8) == 0 && (len = 9) != 0))
&& (name[len] == '\0' || name[len] == '/'
|| (is_path && name[len] == ':')))
|| (name[1] == '{'
&& ((strncmp (&name[2], "ORIGIN}", 7) == 0 && (len = 9) != 0)
|| (strncmp (&name[2], "PLATFORM}", 9) == 0
&& (len = 11) != 0))))
{
repl = ((len == 7 || name[2] == 'O')
? (__libc_enable_secure
&& ((name[len] != '\0'
&& (!is_path || name[len] != ':'))
|| (name != start
&& (!is_path || name[-1] != ':')))
? NULL : l->l_origin)
: _dl_platform);
if (repl != NULL && repl != (const char *) -1)
{
wp = __stpcpy (wp, repl);
name += len;
}
else
{
/* We cannot use this path element, the value of the
replacement is unknown. */
wp = last_elem;
name += len;
while (*name != '\0' && (!is_path || *name != ':'))
++name;
}
}
else
/* No DST we recognize. */
*wp++ = *name++;
}
else if (is_path && *name == ':')
{
*wp++ = *name++;
last_elem = wp;
}
else
*wp++ = *name++;
}
while (*name != '\0');
*wp = '\0';
return result;
}
/* Return copy of argument with all recognized dynamic string tokens
($ORIGIN and $PLATFORM for now) replaced. On some platforms it
might not be possible to determine the path from which the object
belonging to the map is loaded. In this case the path element
containing $ORIGIN is left out. */
static char *
expand_dynamic_string_token (struct link_map *l, const char *s)
{
/* We make two runs over the string. First we determine how large the
resulting string is and then we copy it over. Since this is now
frequently executed operation we are looking here not for performance
but rather for code size. */
size_t cnt;
size_t total;
char *result;
/* Determine the number of DST elements. */
cnt = DL_DST_COUNT (s, 1);
/* If we do not have to replace anything simply copy the string. */
if (cnt == 0)
return local_strdup (s);
/* Determine the length of the substituted string. */
total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
/* Allocate the necessary memory. */
result = (char *) malloc (total + 1);
if (result == NULL)
return NULL;
return DL_DST_SUBSTITUTE (l, s, result, 1);
}
/* Add `name' to the list of names for a particular shared object.
`name' is expected to have been allocated with malloc and will
be freed if the shared object already has this name.
Returns false if the object already had this name. */
static void
internal_function
add_name_to_object (struct link_map *l, const char *name)
{
struct libname_list *lnp, *lastp;
struct libname_list *newname;
size_t name_len;
lastp = NULL;
for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
if (strcmp (name, lnp->name) == 0)
return;
name_len = strlen (name) + 1;
newname = malloc (sizeof *newname + name_len);
if (newname == NULL)
{
/* No more memory. */
_dl_signal_error (ENOMEM, name, "cannot allocate name record");
return;
}
/* The object should have a libname set from _dl_new_object. */
assert (lastp != NULL);
newname->name = memcpy (newname + 1, name, name_len);
newname->next = NULL;
lastp->next = newname;
}
/* All known directories in sorted order. */
static struct r_search_path_elem *all_dirs;
/* Standard search directories. */
static struct r_search_path_elem **rtld_search_dirs;
static size_t max_dirnamelen;
static inline struct r_search_path_elem **
fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
int check_trusted, const char *what, const char *where)
{
char *cp;
size_t nelems = 0;
while ((cp = __strsep (&rpath, sep)) != NULL)
{
struct r_search_path_elem *dirp;
size_t len = strlen (cp);
/* `strsep' can pass an empty string. This has to be
interpreted as `use the current directory'. */
if (len == 0)
{
static const char curwd[] = "./";
cp = (char *) curwd;
}
/* Remove trailing slashes (except for "/"). */
while (len > 1 && cp[len - 1] == '/')
--len;
/* Now add one if there is none so far. */
if (len > 0 && cp[len - 1] != '/')
cp[len++] = '/';
/* Make sure we don't use untrusted directories if we run SUID. */
if (check_trusted)
{
const char *trun = system_dirs;
size_t idx;
/* All trusted directories must be complete names. */
if (cp[0] != '/')
continue;
for (idx = 0;
idx < sizeof (system_dirs_len) / sizeof (system_dirs_len[0]);
++idx)
{
if (len == system_dirs_len[idx] && memcmp (trun, cp, len) == 0)
/* Found it. */
break;
trun += system_dirs_len[idx] + 1;
}
if (idx == sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
/* It's no trusted directory, skip it. */
continue;
}
/* See if this directory is already known. */
for (dirp = all_dirs; dirp != NULL; dirp = dirp->next)
if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
break;
if (dirp != NULL)
{
/* It is available, see whether it's on our own list. */
size_t cnt;
for (cnt = 0; cnt < nelems; ++cnt)
if (result[cnt] == dirp)
break;
if (cnt == nelems)
result[nelems++] = dirp;
}
else
{
size_t cnt;
/* It's a new directory. Create an entry and add it. */
dirp = (struct r_search_path_elem *)
malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status));
if (dirp == NULL)
_dl_signal_error (ENOMEM, NULL,
"cannot create cache for search path");
dirp->dirname = cp;
dirp->dirnamelen = len;
if (len > max_dirnamelen)
max_dirnamelen = len;
/* We have to make sure all the relative directories are never
ignored. The current directory might change and all our
saved information would be void. */
if (cp[0] != '/')
for (cnt = 0; cnt < ncapstr; ++cnt)
dirp->status[cnt] = existing;
else
for (cnt = 0; cnt < ncapstr; ++cnt)
dirp->status[cnt] = unknown;
dirp->what = what;
dirp->where = where;
dirp->next = all_dirs;
all_dirs = dirp;
/* Put it in the result array. */
result[nelems++] = dirp;
}
}
/* Terminate the array. */
result[nelems] = NULL;
return result;
}
static struct r_search_path_elem **
internal_function
decompose_rpath (const char *rpath, struct link_map *l, const char *what)
{
/* Make a copy we can work with. */
const char *where = l->l_name;
char *copy;
char *cp;
struct r_search_path_elem **result;
size_t nelems;
/* First see whether we must forget the RUNPATH and RPATH from this
object. */
if (_dl_inhibit_rpath != NULL && !__libc_enable_secure)
{
const char *found = strstr (_dl_inhibit_rpath, where);
if (found != NULL)
{
size_t len = strlen (where);
if ((found == _dl_inhibit_rpath || found[-1] == ':')
&& (found[len] == '\0' || found[len] == ':'))
{
/* This object is on the list of objects for which the
RUNPATH and RPATH must not be used. */
result = (struct r_search_path_elem **)
malloc (sizeof (*result));
if (result == NULL)
_dl_signal_error (ENOMEM, NULL,
"cannot create cache for search path");
result[0] = NULL;
return result;
}
}
}
/* Make a writable copy. At the same time expand possible dynamic
string tokens. */
copy = expand_dynamic_string_token (l, rpath);
if (copy == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create RUNPATH/RPATH copy");
/* Count the number of necessary elements in the result array. */
nelems = 0;
for (cp = copy; *cp != '\0'; ++cp)
if (*cp == ':')
++nelems;
/* Allocate room for the result. NELEMS + 1 is an upper limit for the
number of necessary entries. */
result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
* sizeof (*result));
if (result == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create cache for search path");
return fillin_rpath (copy, result, ":", 0, what, where);
}
void
internal_function
_dl_init_paths (const char *llp)
{
size_t idx;
const char *strp;
struct r_search_path_elem *pelem, **aelem;
size_t round_size;
#ifdef PIC
struct link_map *l;
#endif
/* Fill in the information about the application's RPATH and the
directories addressed by the LD_LIBRARY_PATH environment variable. */
/* Get the capabilities. */
capstr = _dl_important_hwcaps (_dl_platform, _dl_platformlen,
&ncapstr, &max_capstrlen);
/* First set up the rest of the default search directory entries. */
aelem = rtld_search_dirs = (struct r_search_path_elem **)
malloc ((sizeof (system_dirs_len) / sizeof (system_dirs_len[0]) + 1)
* sizeof (struct r_search_path_elem *));
if (rtld_search_dirs == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create search path array");
round_size = ((2 * sizeof (struct r_search_path_elem) - 1
+ ncapstr * sizeof (enum r_dir_status))
/ sizeof (struct r_search_path_elem));
rtld_search_dirs[0] = (struct r_search_path_elem *)
malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]) - 1)
* round_size * sizeof (struct r_search_path_elem));
if (rtld_search_dirs[0] == NULL)
_dl_signal_error (ENOMEM, NULL, "cannot create cache for search path");
pelem = all_dirs = rtld_search_dirs[0];
strp = system_dirs;
idx = 0;
do
{
size_t cnt;
*aelem++ = pelem;
pelem->what = "system search path";
pelem->where = NULL;
pelem->dirname = strp;
pelem->dirnamelen = system_dirs_len[idx];
strp += system_dirs_len[idx] + 1;
if (pelem->dirname[0] != '/')
for (cnt = 0; cnt < ncapstr; ++cnt)
pelem->status[cnt] = existing;
else
for (cnt = 0; cnt < ncapstr; ++cnt)
pelem->status[cnt] = unknown;
pelem->next = (++idx == (sizeof (system_dirs_len)
/ sizeof (system_dirs_len[0]))
? NULL : (pelem + round_size));
pelem += round_size;
}
while (idx < sizeof (system_dirs_len) / sizeof (system_dirs_len[0]));
max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
*aelem = NULL;
#ifdef PIC
/* This points to the map of the main object. */
l = _dl_loaded;
if (l != NULL)
{
assert (l->l_type != lt_loaded);
if (l->l_info[DT_RUNPATH])
{
/* Allocate room for the search path and fill in information
from RUNPATH. */
l->l_runpath_dirs =
decompose_rpath ((const void *) (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_RUNPATH]->d_un.d_val),
l, "RUNPATH");
/* The RPATH is ignored. */
l->l_rpath_dirs = NULL;
}
else
{
l->l_runpath_dirs = NULL;
if (l->l_info[DT_RPATH])
/* Allocate room for the search path and fill in information
from RPATH. */
l->l_rpath_dirs =
decompose_rpath ((const void *) (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_RPATH]->d_un.d_val),
l, "RPATH");
else
l->l_rpath_dirs = NULL;
}
}
#endif /* PIC */
if (llp != NULL && *llp != '\0')
{
size_t nllp;
const char *cp = llp;
/* Decompose the LD_LIBRARY_PATH contents. First determine how many
elements it has. */
nllp = 1;
while (*cp)
{
if (*cp == ':' || *cp == ';')
++nllp;
++cp;
}
env_path_list = (struct r_search_path_elem **)
malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
if (env_path_list == NULL)
_dl_signal_error (ENOMEM, NULL,
"cannot create cache for search path");
(void) fillin_rpath (local_strdup (llp), env_path_list, ":;",
__libc_enable_secure, "LD_LIBRARY_PATH", NULL);
}
}
/* Think twice before changing anything in this function. It is placed
here and prepared using the `alloca' magic to prevent it from being
inlined. The function is only called in case of an error. But then
performance does not count. The function used to be "inlinable" and
the compiled did so all the time. This increased the code size for
absolutely no good reason. */
#define LOSE(code, s) lose (code, fd, name, realname, l, s)
static void
__attribute__ ((noreturn))
lose (int code, int fd, const char *name, char *realname, struct link_map *l,
const char *msg)
{
/* The use of `alloca' here looks ridiculous but it helps. The goal
is to avoid the function from being inlined. There is no official
way to do this so we use this trick. gcc never inlines functions
which use `alloca'. */
int *a = alloca (sizeof (int));
a[0] = fd;
(void) __close (a[0]);
if (l != NULL)
{
/* Remove the stillborn object from the list and free it. */
if (l->l_prev)
l->l_prev->l_next = l->l_next;
if (l->l_next)
l->l_next->l_prev = l->l_prev;
free (l);
}
free (realname);
_dl_signal_error (code, name, msg);
}
/* Map in the shared object NAME, actually located in REALNAME, and already
opened on FD. */
#ifndef EXTERNAL_MAP_FROM_FD
static
#endif
struct link_map *
_dl_map_object_from_fd (const char *name, int fd, char *realname,
struct link_map *loader, int l_type)
{
/* This is the expected ELF header. */
#define ELF32_CLASS ELFCLASS32
#define ELF64_CLASS ELFCLASS64
#ifndef VALID_ELF_HEADER
# define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
# define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
# define VALID_ELF_ABIVERSION(ver) (ver == 0)
#endif
static const unsigned char expected[EI_PAD] =
{
[EI_MAG0] = ELFMAG0,
[EI_MAG1] = ELFMAG1,
[EI_MAG2] = ELFMAG2,
[EI_MAG3] = ELFMAG3,
[EI_CLASS] = ELFW(CLASS),
[EI_DATA] = byteorder,
[EI_VERSION] = EV_CURRENT,
[EI_OSABI] = ELFOSABI_SYSV,
[EI_ABIVERSION] = 0
};
struct link_map *l = NULL;
inline caddr_t map_segment (ElfW(Addr) mapstart, size_t len,
int prot, int fixed, off_t offset)
{
caddr_t mapat = __mmap ((caddr_t) mapstart, len, prot,
fixed|MAP_COPY|MAP_FILE,
fd, offset);
if (mapat == MAP_FAILED)
LOSE (errno, "failed to map segment from shared object");
return mapat;
}
const ElfW(Ehdr) *header;
const ElfW(Phdr) *phdr;
const ElfW(Phdr) *ph;
size_t maplength;
int type;
char *readbuf;
ssize_t readlength;
struct stat st;
/* Get file information. */
if (__fxstat (_STAT_VER, fd, &st) < 0)
LOSE (errno, "cannot stat shared object");
/* Look again to see if the real name matched another already loaded. */
for (l = _dl_loaded; l; l = l->l_next)
if (l->l_ino == st.st_ino && l->l_dev == st.st_dev)
{
/* The object is already loaded.
Just bump its reference count and return it. */
__close (fd);
/* If the name is not in the list of names for this object add
it. */
free (realname);
add_name_to_object (l, name);
++l->l_opencount;
return l;
}
/* Print debugging message. */
if (_dl_debug_files)
_dl_debug_message (1, "file=", name, "; generating link map\n", NULL);
/* Read the header directly. */
readbuf = alloca (_dl_pagesize);
readlength = __libc_read (fd, readbuf, _dl_pagesize);
if (readlength < (ssize_t) sizeof (*header))
LOSE (errno, "cannot read file data");
header = (void *) readbuf;
/* Check the header for basic validity. */
if (__builtin_expect (!VALID_ELF_HEADER (header->e_ident, expected, EI_PAD),
0))
{
/* Something is wrong. */
if (*(Elf32_Word *) &header->e_ident !=
#if BYTE_ORDER == LITTLE_ENDIAN
((ELFMAG0 << (EI_MAG0 * 8)) |
(ELFMAG1 << (EI_MAG1 * 8)) |
(ELFMAG2 << (EI_MAG2 * 8)) |
(ELFMAG3 << (EI_MAG3 * 8)))
#else
((ELFMAG0 << (EI_MAG3 * 8)) |
(ELFMAG1 << (EI_MAG2 * 8)) |
(ELFMAG2 << (EI_MAG1 * 8)) |
(ELFMAG3 << (EI_MAG0 * 8)))
#endif
)
LOSE (0, "invalid ELF header");
if (header->e_ident[EI_CLASS] != ELFW(CLASS))
LOSE (0, "ELF file class not " STRING(__ELF_NATIVE_CLASS) "-bit");
if (header->e_ident[EI_DATA] != byteorder)
LOSE (0, "ELF file data encoding not " byteorder_name);
if (header->e_ident[EI_VERSION] != EV_CURRENT)
LOSE (0, "ELF file version ident not " STRING(EV_CURRENT));
/* XXX We should be able so set system specific versions which are
allowed here. */
if (!VALID_ELF_OSABI (header->e_ident[EI_OSABI]))
LOSE (0, "ELF file OS ABI invalid.");
if (!VALID_ELF_ABIVERSION (header->e_ident[EI_ABIVERSION]))
LOSE (0, "ELF file ABI version invalid.");
LOSE (0, "internal error");
}
if (__builtin_expect (header->e_version, EV_CURRENT) != EV_CURRENT)
LOSE (0, "ELF file version not " STRING(EV_CURRENT));
if (! __builtin_expect (elf_machine_matches_host (header->e_machine), 1))
LOSE (0, "ELF file machine architecture not " ELF_MACHINE_NAME);
if (__builtin_expect (header->e_phentsize, sizeof (ElfW(Phdr)))
!= sizeof (ElfW(Phdr)))
LOSE (0, "ELF file's phentsize not the expected size");
#ifndef MAP_ANON
# define MAP_ANON 0
if (_dl_zerofd == -1)
{
_dl_zerofd = _dl_sysdep_open_zero_fill ();
if (_dl_zerofd == -1)
{
__close (fd);
_dl_signal_error (errno, NULL, "cannot open zero fill device");
}
}
#endif
/* Enter the new object in the list of loaded objects. */
l = _dl_new_object (realname, name, l_type, loader);
if (__builtin_expect (! l, 0))
LOSE (ENOMEM, "cannot create shared object descriptor");
l->l_opencount = 1;
/* Extract the remaining details we need from the ELF header
and then read in the program header table. */
l->l_entry = header->e_entry;
type = header->e_type;
l->l_phnum = header->e_phnum;
maplength = header->e_phnum * sizeof (ElfW(Phdr));
if (header->e_phoff + maplength <= readlength)
phdr = (void *) (readbuf + header->e_phoff);
else
{
phdr = alloca (maplength);
__lseek (fd, SEEK_SET, header->e_phoff);
if (__libc_read (fd, (void *) phdr, maplength) != maplength)
LOSE (errno, "cannot read file data");
}
{
/* Scan the program header table, collecting its load commands. */
struct loadcmd
{
ElfW(Addr) mapstart, mapend, dataend, allocend;
off_t mapoff;
int prot;
} loadcmds[l->l_phnum], *c;
size_t nloadcmds = 0;
/* The struct is initialized to zero so this is not necessary:
l->l_ld = 0;
l->l_phdr = 0;
l->l_addr = 0; */
for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
switch (ph->p_type)
{
/* These entries tell us where to find things once the file's
segments are mapped in. We record the addresses it says
verbatim, and later correct for the run-time load address. */
case PT_DYNAMIC:
l->l_ld = (void *) ph->p_vaddr;
break;
case PT_PHDR:
l->l_phdr = (void *) ph->p_vaddr;
break;
case PT_LOAD:
/* A load command tells us to map in part of the file.
We record the load commands and process them all later. */
if (ph->p_align % _dl_pagesize != 0)
LOSE (0, "ELF load command alignment not page-aligned");
if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
LOSE (0, "ELF load command address/offset not properly aligned");
{
struct loadcmd *c = &loadcmds[nloadcmds++];
c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)
& ~(_dl_pagesize - 1));
c->dataend = ph->p_vaddr + ph->p_filesz;
c->allocend = ph->p_vaddr + ph->p_memsz;
c->mapoff = ph->p_offset & ~(ph->p_align - 1);
/* Optimize a common case. */
if ((PF_R | PF_W | PF_X) == 7
&& (PROT_READ | PROT_WRITE | PROT_EXEC) == 7)
c->prot = _dl_pf_to_prot[ph->p_flags & (PF_R | PF_W | PF_X)];
else
{
c->prot = 0;
if (ph->p_flags & PF_R)
c->prot |= PROT_READ;
if (ph->p_flags & PF_W)
c->prot |= PROT_WRITE;
if (ph->p_flags & PF_X)
c->prot |= PROT_EXEC;
}
break;
}
}
/* Now process the load commands and map segments into memory. */
c = loadcmds;
/* Length of the sections to be loaded. */
maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
if (type == ET_DYN || type == ET_REL)
{
/* This is a position-independent shared object. We can let the
kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its
extent increased to cover all the segments. Then we remove
access from excess portion, and there is known sufficient space
there to remap from the later segments.
As a refinement, sometimes we have an address that we would
prefer to map such objects at; but this is only a preference,
the OS can do whatever it likes. */
caddr_t mapat;
ElfW(Addr) mappref;
mappref = (ELF_PREFERRED_ADDRESS (loader, maplength, c->mapstart)
- MAP_BASE_ADDR (l));
mapat = map_segment (mappref, maplength, c->prot, 0, c->mapoff);
l->l_addr = (ElfW(Addr)) mapat - c->mapstart;
/* Change protection on the excess portion to disallow all access;
the portions we do not remap later will be inaccessible as if
unallocated. Then jump into the normal segment-mapping loop to
handle the portion of the segment past the end of the file
mapping. */
__mprotect ((caddr_t) (l->l_addr + c->mapend),
loadcmds[nloadcmds - 1].allocend - c->mapend,
0);
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
goto postmap;
}
else
{
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed. */
ELF_FIXED_ADDRESS (loader, c->mapstart);
}
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
l->l_map_end = l->l_map_start + maplength;
while (c < &loadcmds[nloadcmds])
{
if (c->mapend > c->mapstart)
/* Map the segment contents from the file. */
map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
c->prot, MAP_FIXED, c->mapoff);
postmap:
if (l->l_phdr == 0
&& c->mapoff <= header->e_phoff
&& (c->mapend - c->mapstart + c->mapoff
>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
/* Found the program header in this segment. */
l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
ElfW(Addr) zero, zeroend, zeropage;
zero = l->l_addr + c->dataend;
zeroend = l->l_addr + c->allocend;
zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);
if (zeroend < zeropage)
/* All the extra data is in the last page of the segment.
We can just zero it. */
zeropage = zeroend;
if (zeropage > zero)
{
/* Zero the final part of the last page of the segment. */
if ((c->prot & PROT_WRITE) == 0)
{
/* Dag nab it. */
if (__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
_dl_pagesize, c->prot|PROT_WRITE) < 0)
LOSE (errno, "cannot change memory protections");
}
memset ((void *) zero, 0, zeropage - zero);
if ((c->prot & PROT_WRITE) == 0)
__mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),
_dl_pagesize, c->prot);
}
if (zeroend > zeropage)
{
/* Map the remaining zero pages in from the zero fill FD. */
caddr_t mapat;
mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
ANONFD, 0);
if (mapat == MAP_FAILED)
LOSE (errno, "cannot map zero-fill pages");
}
}
++c;
}
if (l->l_phdr == NULL)
{
/* The program header is not contained in any of the segmenst.
We have to allocate memory ourself and copy it over from
out temporary place. */
ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
* sizeof (ElfW(Phdr)));
if (newp == NULL)
LOSE (ENOMEM, "cannot allocate memory for program header");
l->l_phdr = memcpy (newp, phdr,
(header->e_phnum * sizeof (ElfW(Phdr))));
l->l_phdr_allocated = 1;
}
else
/* Adjust the PT_PHDR value by the runtime load address. */
(ElfW(Addr)) l->l_phdr += l->l_addr;
}
/* We are done mapping in the file. We no longer need the descriptor. */
__close (fd);
if (l->l_type == lt_library && type == ET_EXEC)
l->l_type = lt_executable;
if (l->l_ld == 0)
{
if (type == ET_DYN)
LOSE (0, "object file has no dynamic section");
}
else
(ElfW(Addr)) l->l_ld += l->l_addr;
l->l_entry += l->l_addr;
if (_dl_debug_files)
{
const size_t nibbles = sizeof (void *) * 2;
char buf1[nibbles + 1];
char buf2[nibbles + 1];
char buf3[nibbles + 1];
buf1[nibbles] = '\0';
buf2[nibbles] = '\0';
buf3[nibbles] = '\0';
memset (buf1, '0', nibbles);
memset (buf2, '0', nibbles);
memset (buf3, '0', nibbles);
_itoa_word ((unsigned long int) l->l_ld, &buf1[nibbles], 16, 0);
_itoa_word ((unsigned long int) l->l_addr, &buf2[nibbles], 16, 0);
_itoa_word (maplength, &buf3[nibbles], 16, 0);
_dl_debug_message (1, " dynamic: 0x", buf1, " base: 0x", buf2,
" size: 0x", buf3, "\n", NULL);
memset (buf1, '0', nibbles);
memset (buf2, '0', nibbles);
memset (buf3, ' ', nibbles);
_itoa_word ((unsigned long int) l->l_entry, &buf1[nibbles], 16, 0);
_itoa_word ((unsigned long int) l->l_phdr, &buf2[nibbles], 16, 0);
_itoa_word (l->l_phnum, &buf3[nibbles], 10, 0);
_dl_debug_message (1, " entry: 0x", buf1, " phdr: 0x", buf2,
" phnum: ", buf3, "\n\n", NULL);
}
elf_get_dynamic_info (l);
if (l->l_info[DT_HASH])
_dl_setup_hash (l);
/* If this object has DT_SYMBOLIC set modify now its scope. We don't
have to do this for the main map. */
if (l->l_info[DT_SYMBOLIC] && &l->l_searchlist != l->l_scope[0])
{
/* Create an appropriate searchlist. It contains only this map.
XXX This is the definition of DT_SYMBOLIC in SysVr4. The old
GNU ld.so implementation had a different interpretation which
is more reasonable. We are prepared to add this possibility
back as part of a GNU extension of the ELF format. */
l->l_symbolic_searchlist.r_list =
(struct link_map **) malloc (sizeof (struct link_map *));
if (l->l_symbolic_searchlist.r_list == NULL)
LOSE (ENOMEM, "cannot create searchlist");
l->l_symbolic_searchlist.r_list[0] = l;
l->l_symbolic_searchlist.r_nlist = 1;
l->l_symbolic_searchlist.r_duplist = l->l_symbolic_searchlist.r_list;
l->l_symbolic_searchlist.r_nduplist = 1;
/* Now move the existing entries one back. */
memmove (&l->l_scope[1], &l->l_scope[0],
sizeof (l->l_scope) - sizeof (l->l_scope[0]));
/* Now add the new entry. */
l->l_scope[0] = &l->l_symbolic_searchlist;
}
/* Finally the file information. */
l->l_dev = st.st_dev;
l->l_ino = st.st_ino;
return l;
}
/* Print search path. */
static void
print_search_path (struct r_search_path_elem **list,
const char *what, const char *name)
{
char buf[max_dirnamelen + max_capstrlen];
int first = 1;
_dl_debug_message (1, " search path=", NULL);
while (*list != NULL && (*list)->what == what) /* Yes, ==. */
{
char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
size_t cnt;
for (cnt = 0; cnt < ncapstr; ++cnt)
if ((*list)->status[cnt] != nonexisting)
{
char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
cp[0] = '\0';
else
cp[-1] = '\0';
_dl_debug_message (0, first ? "" : ":", buf, NULL);
first = 0;
}
++list;
}
if (name != NULL)
_dl_debug_message (0, "\t\t(", what, " from file ",
name[0] ? name : _dl_argv[0], ")\n", NULL);
else
_dl_debug_message (0, "\t\t(", what, ")\n", NULL);
}
/* Try to open NAME in one of the directories in DIRS.
Return the fd, or -1. If successful, fill in *REALNAME
with the malloc'd full directory name. */
static int
open_path (const char *name, size_t namelen, int preloaded,
struct r_search_path_elem **dirs,
char **realname)
{
char *buf;
int fd = -1;
const char *current_what = NULL;
if (dirs == NULL || *dirs == NULL)
{
__set_errno (ENOENT);
return -1;
}
buf = alloca (max_dirnamelen + max_capstrlen + namelen);
do
{
struct r_search_path_elem *this_dir = *dirs;
size_t buflen = 0;
size_t cnt;
char *edp;
/* If we are debugging the search for libraries print the path
now if it hasn't happened now. */
if (_dl_debug_libs && current_what != this_dir->what)
{
current_what = this_dir->what;
print_search_path (dirs, current_what, this_dir->where);
}
edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
{
/* Skip this directory if we know it does not exist. */
if (this_dir->status[cnt] == nonexisting)
continue;
buflen =
((char *) __mempcpy (__mempcpy (edp,
capstr[cnt].str, capstr[cnt].len),
name, namelen)
- buf);
/* Print name we try if this is wanted. */
if (_dl_debug_libs)
_dl_debug_message (1, " trying file=", buf, "\n", NULL);
fd = __open (buf, O_RDONLY);
if (this_dir->status[cnt] == unknown)
{
if (fd != -1)
this_dir->status[cnt] = existing;
else
{
/* We failed to open machine dependent library. Let's
test whether there is any directory at all. */
struct stat st;
buf[buflen - namelen - 1] = '\0';
if (__xstat (_STAT_VER, buf, &st) != 0
|| ! S_ISDIR (st.st_mode))
/* The directory does not exist or it is no directory. */
this_dir->status[cnt] = nonexisting;
else
this_dir->status[cnt] = existing;
}
}
if (fd != -1 && preloaded && __libc_enable_secure)
{
/* This is an extra security effort to make sure nobody can
preload broken shared objects which are in the trusted
directories and so exploit the bugs. */
struct stat st;
if (__fxstat (_STAT_VER, fd, &st) != 0
|| (st.st_mode & S_ISUID) == 0)
{
/* The shared object cannot be tested for being SUID
or this bit is not set. In this case we must not
use this object. */
__close (fd);
fd = -1;
/* We simply ignore the file, signal this by setting
the error value which would have been set by `open'. */
errno = ENOENT;
}
}
}
if (fd != -1)
{
*realname = malloc (buflen);
if (*realname != NULL)
{
memcpy (*realname, buf, buflen);
return fd;
}
else
{
/* No memory for the name, we certainly won't be able
to load and link it. */
__close (fd);
return -1;
}
}
if (errno != ENOENT && errno != EACCES)
/* The file exists and is readable, but something went wrong. */
return -1;
}
while (*++dirs != NULL);
return -1;
}
/* Map in the shared object file NAME. */
struct link_map *
internal_function
_dl_map_object (struct link_map *loader, const char *name, int preloaded,
int type, int trace_mode)
{
int fd;
char *realname;
char *name_copy;
struct link_map *l;
/* Look for this name among those already loaded. */
for (l = _dl_loaded; l; l = l->l_next)
{
/* If the requested name matches the soname of a loaded object,
use that object. Elide this check for names that have not
yet been opened. */
if (l->l_opencount <= 0)
continue;
if (!_dl_name_match_p (name, l))
{
const char *soname;
if (l->l_info[DT_SONAME] == NULL)
continue;
soname = (const void *) (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_SONAME]->d_un.d_val);
if (strcmp (name, soname) != 0)
continue;
/* We have a match on a new name -- cache it. */
add_name_to_object (l, soname);
}
/* We have a match -- bump the reference count and return it. */
++l->l_opencount;
return l;
}
/* Display information if we are debugging. */
if (_dl_debug_files && loader != NULL)
_dl_debug_message (1, "\nfile=", name, "; needed by ",
loader->l_name[0] ? loader->l_name : _dl_argv[0],
"\n", NULL);
if (strchr (name, '/') == NULL)
{
/* Search for NAME in several places. */
size_t namelen = strlen (name) + 1;
if (_dl_debug_libs)
_dl_debug_message (1, "find library=", name, "; searching\n", NULL);
fd = -1;
/* When the object has the RUNPATH information we don't use any
RPATHs. */
if (loader != NULL && loader->l_info[DT_RUNPATH] == NULL)
{
/* First try the DT_RPATH of the dependent object that caused NAME
to be loaded. Then that object's dependent, and on up. */
for (l = loader; fd == -1 && l; l = l->l_loader)
if (l->l_info[DT_RPATH])
{
/* Make sure the cache information is available. */
if (l->l_rpath_dirs == NULL)
{
size_t ptrval = (l->l_info[DT_STRTAB]->d_un.d_ptr
+ l->l_info[DT_RPATH]->d_un.d_val);
l->l_rpath_dirs =
decompose_rpath ((const char *) ptrval, l, "RPATH");
}
if (l->l_rpath_dirs != NULL)
fd = open_path (name, namelen, preloaded, l->l_rpath_dirs,
&realname);
}
/* If dynamically linked, try the DT_RPATH of the executable
itself. */
l = _dl_loaded;
if (fd == -1 && l && l->l_type != lt_loaded && l != loader
&& l->l_rpath_dirs != NULL)
fd = open_path (name, namelen, preloaded, l->l_rpath_dirs,
&realname);
}
/* Try the LD_LIBRARY_PATH environment variable. */
if (fd == -1 && env_path_list != NULL)
fd = open_path (name, namelen, preloaded, env_path_list, &realname);
/* Look at the RUNPATH informaiton for this binary. */
if (loader != NULL && loader->l_info[DT_RUNPATH])
{
/* Make sure the cache information is available. */
if (loader->l_runpath_dirs == NULL)
{
size_t ptrval = (loader->l_info[DT_STRTAB]->d_un.d_ptr
+ loader->l_info[DT_RUNPATH]->d_un.d_val);
loader->l_runpath_dirs =
decompose_rpath ((const char *) ptrval, loader, "RUNPATH");
}
if (loader->l_runpath_dirs != NULL)
fd = open_path (name, namelen, preloaded, loader->l_runpath_dirs,
&realname);
}
if (fd == -1)
{
/* Check the list of libraries in the file /etc/ld.so.cache,
for compatibility with Linux's ldconfig program. */
extern const char *_dl_load_cache_lookup (const char *name);
const char *cached = _dl_load_cache_lookup (name);
if (cached)
{
fd = __open (cached, O_RDONLY);
if (fd != -1)
{
realname = local_strdup (cached);
if (realname == NULL)
{
__close (fd);
fd = -1;
}
}
}
}
/* Finally, try the default path. */
if (fd == -1)
fd = open_path (name, namelen, preloaded, rtld_search_dirs, &realname);
/* Add another newline when we a tracing the library loading. */
if (_dl_debug_libs)
_dl_debug_message (1, "\n", NULL);
}
else
{
/* The path may contain dynamic string tokens. */
realname = (loader
? expand_dynamic_string_token (loader, name)
: local_strdup (name));
if (realname == NULL)
fd = -1;
else
{
fd = __open (realname, O_RDONLY);
if (fd == -1)
free (realname);
}
}
if (fd == -1)
{
if (trace_mode)
{
/* We haven't found an appropriate library. But since we
are only interested in the list of libraries this isn't
so severe. Fake an entry with all the information we
have. */
static const Elf_Symndx dummy_bucket = STN_UNDEF;
/* Enter the new object in the list of loaded objects. */
if ((name_copy = local_strdup (name)) == NULL
|| (l = _dl_new_object (name_copy, name, type, loader)) == NULL)
_dl_signal_error (ENOMEM, name,
"cannot create shared object descriptor");
/* We use an opencount of 0 as a sign for the faked entry.
Since the descriptor is initialized with zero we do not
have do this here.
l->l_opencount = 0;
l->l_reserved = 0; */
l->l_buckets = &dummy_bucket;
l->l_nbuckets = 1;
l->l_relocated = 1;
return l;
}
else
_dl_signal_error (errno, name, "cannot open shared object file");
}
return _dl_map_object_from_fd (name, fd, realname, loader, type);
}
|
169,254,254,633,061 | @node Searching and Sorting, Pattern Matching, Message Translation, Top
@c %MENU% General searching and sorting functions
@chapter Searching and Sorting
This chapter describes functions for searching and sorting arrays of
arbitrary objects. You pass the appropriate comparison function to be
applied as an argument, along with the size of the objects in the array
and the total number of elements.
@menu
* Comparison Functions:: Defining how to compare two objects.
Since the sort and search facilities
are general, you have to specify the
ordering.
* Array Search Function:: The @code{bsearch} function.
* Array Sort Function:: The @code{qsort} function.
* Search/Sort Example:: An example program.
* Hash Search Function:: The @code{hsearch} function.
* Tree Search Function:: The @code{tsearch} function.
@end menu
@node Comparison Functions
@section Defining the Comparison Function
@cindex Comparison Function
In order to use the sorted array library functions, you have to describe
how to compare the elements of the array.
To do this, you supply a comparison function to compare two elements of
the array. The library will call this function, passing as arguments
pointers to two array elements to be compared. Your comparison function
should return a value the way @code{strcmp} (@pxref{String/Array
Comparison}) does: negative if the first argument is ``less'' than the
second, zero if they are ``equal'', and positive if the first argument
is ``greater''.
Here is an example of a comparison function which works with an array of
numbers of type @code{double}:
@smallexample
int
compare_doubles (const void *a, const void *b)
@{
const double *da = (const double *) a;
const double *db = (const double *) b;
return (*da > *db) - (*da < *db);
@}
@end smallexample
The header file @file{stdlib.h} defines a name for the data type of
comparison functions. This type is a GNU extension.
@comment stdlib.h
@comment GNU
@tindex comparison_fn_t
@smallexample
int comparison_fn_t (const void *, const void *);
@end smallexample
@node Array Search Function
@section Array Search Function
@cindex search function (for arrays)
@cindex binary search function (for arrays)
@cindex array search function
Generally searching for a specific element in an array means that
potentially all elements must be checked. The GNU C library contains
functions to perform linear search. The prototypes for the following
two functions can be found in @file{search.h}.
@comment search.h
@comment SVID
@deftypefun {void *} lfind (const void *@var{key}, void *@var{base}, size_t *@var{nmemb}, size_t @var{size}, comparison_fn_t @var{compar})
The @code{lfind} function searches in the array with @code{*@var{nmemb}}
elements of @var{size} bytes pointed to by @var{base} for an element
which matches the one pointed to by @var{key}. The function pointed to
by @var{compar} is used decide whether two elements match.
The return value is a pointer to the matching element in the array
starting at @var{base} if it is found. If no matching element is
available @code{NULL} is returned.
The mean runtime of this function is @code{*@var{nmemb}}/2. This
function should only be used elements often get added to or deleted from
the array in which case it might not be useful to sort the array before
searching.
@end deftypefun
@comment search.h
@comment SVID
@deftypefun {void *} lsearch (const void *@var{key}, void *@var{base}, size_t *@var{nmemb}, size_t @var{size}, comparison_fn_t @var{compar})
The @code{lsearch} function is similar to the @code{lfind} function. It
searches the given array for an element and returns it if found. The
difference is that if no matching element is found the @code{lsearch}
function adds the object pointed to by @var{key} (with a size of
@var{size} bytes) at the end of the array and it increments the value of
@code{*@var{nmemb}} to reflect this addition.
This means for the caller that if it is not sure that the array contains
the element one is searching for the memory allocated for the array
starting at @var{base} must have room for at least @var{size} more
bytes. If one is sure the element is in the array it is better to use
@code{lfind} so having more room in the array is always necessary when
calling @code{lsearch}.
@end deftypefun
To search a sorted array for an element matching the key, use the
@code{bsearch} function. The prototype for this function is in
the header file @file{stdlib.h}.
@pindex stdlib.h
@comment stdlib.h
@comment ISO
@deftypefun {void *} bsearch (const void *@var{key}, const void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
The @code{bsearch} function searches the sorted array @var{array} for an object
that is equivalent to @var{key}. The array contains @var{count} elements,
each of which is of size @var{size} bytes.
The @var{compare} function is used to perform the comparison. This
function is called with two pointer arguments and should return an
integer less than, equal to, or greater than zero corresponding to
whether its first argument is considered less than, equal to, or greater
than its second argument. The elements of the @var{array} must already
be sorted in ascending order according to this comparison function.
The return value is a pointer to the matching array element, or a null
pointer if no match is found. If the array contains more than one element
that matches, the one that is returned is unspecified.
This function derives its name from the fact that it is implemented
using the binary search algorithm.
@end deftypefun
@node Array Sort Function
@section Array Sort Function
@cindex sort function (for arrays)
@cindex quick sort function (for arrays)
@cindex array sort function
To sort an array using an arbitrary comparison function, use the
@code{qsort} function. The prototype for this function is in
@file{stdlib.h}.
@pindex stdlib.h
@comment stdlib.h
@comment ISO
@deftypefun void qsort (void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
The @var{qsort} function sorts the array @var{array}. The array contains
@var{count} elements, each of which is of size @var{size}.
The @var{compare} function is used to perform the comparison on the
array elements. This function is called with two pointer arguments and
should return an integer less than, equal to, or greater than zero
corresponding to whether its first argument is considered less than,
equal to, or greater than its second argument.
@cindex stable sorting
@strong{Warning:} If two objects compare as equal, their order after
sorting is unpredictable. That is to say, the sorting is not stable.
This can make a difference when the comparison considers only part of
the elements. Two elements with the same sort key may differ in other
respects.
If you want the effect of a stable sort, you can get this result by
writing the comparison function so that, lacking other reason
distinguish between two elements, it compares them by their addresses.
Note that doing this may make the sorting algorithm less efficient, so
do it only if necessary.
Here is a simple example of sorting an array of doubles in numerical
order, using the comparison function defined above (@pxref{Comparison
Functions}):
@smallexample
@{
double *array;
int size;
@dots{}
qsort (array, size, sizeof (double), compare_doubles);
@}
@end smallexample
The @code{qsort} function derives its name from the fact that it was
originally implemented using the ``quick sort'' algorithm.
@end deftypefun
@node Search/Sort Example
@section Searching and Sorting Example
Here is an example showing the use of @code{qsort} and @code{bsearch}
with an array of structures. The objects in the array are sorted
by comparing their @code{name} fields with the @code{strcmp} function.
Then, we can look up individual objects based on their names.
@comment This example is dedicated to the memory of Jim Henson. RIP.
@smallexample
@include search.c.texi
@end smallexample
@cindex Kermit the frog
The output from this program looks like:
@smallexample
Kermit, the frog
Piggy, the pig
Gonzo, the whatever
Fozzie, the bear
Sam, the eagle
Robin, the frog
Animal, the animal
Camilla, the chicken
Sweetums, the monster
Dr. Strangepork, the pig
Link Hogthrob, the pig
Zoot, the human
Dr. Bunsen Honeydew, the human
Beaker, the human
Swedish Chef, the human
Animal, the animal
Beaker, the human
Camilla, the chicken
Dr. Bunsen Honeydew, the human
Dr. Strangepork, the pig
Fozzie, the bear
Gonzo, the whatever
Kermit, the frog
Link Hogthrob, the pig
Piggy, the pig
Robin, the frog
Sam, the eagle
Swedish Chef, the human
Sweetums, the monster
Zoot, the human
Kermit, the frog
Gonzo, the whatever
Couldn't find Janice.
@end smallexample
@node Hash Search Function
@section The @code{hsearch} function.
The functions mentioned so far in this chapter are searching in a sorted
or unsorted array. There are other methods to organize information
which later should be searched. The costs of insert, delete and search
differ. One possible implementation is using hashing tables.
@comment search.h
@comment SVID
@deftypefun int hcreate (size_t @var{nel})
The @code{hcreate} function creates a hashing table which can contain at
least @var{nel} elements. There is no possibility to grow this table so
it is necessary to choose the value for @var{nel} wisely. The used
methods to implement this function might make it necessary to make the
number of elements in the hashing table larger than the expected maximal
number of elements. Hashing tables usually work inefficient if they are
filled 80% or more. The constant access time guaranteed by hashing can
only be achieved if few collisions exist. See Knuth's ``The Art of
Computer Programming, Part 3: Searching and Sorting'' for more
information.
The weakest aspect of this function is that there can be at most one
hashing table used through the whole program. The table is allocated
in local memory out of control of the programmer. As an extension the
GNU C library provides an additional set of functions with an reentrant
interface which provide a similar interface but which allow to keep
arbitrary many hashing tables.
It is possible to use more than one hashing table in the program run if
the former table is first destroyed by a call to @code{hdestroy}.
The function returns a non-zero value if successful. If it return zero
something went wrong. This could either mean there is already a hashing
table in use or the program runs out of memory.
@end deftypefun
@comment search.h
@comment SVID
@deftypefun void hdestroy (void)
The @code{hdestroy} function can be used to free all the resources
allocated in a previous call of @code{hcreate}. After a call to this
function it is again possible to call @code{hcreate} and allocate a new
table with possibly different size.
It is important to remember that the elements contained in the hashing
table at the time @code{hdestroy} is called are @emph{not} freed by this
function. It is the responsibility of the program code to free those
strings (if necessary at all). Freeing all the element memory is not
possible without extra, separately kept information since there is no
function to iterate through all available elements in the hashing table.
If it is really necessary to free a table and all elements the
programmer has to keep a list of all table elements and before calling
@code{hdestroy} s/he has to free all element's data using this list.
This is a very unpleasant mechanism and it also shows that this kind of
hashing tables is mainly meant for tables which are created once and
used until the end of the program run.
@end deftypefun
Entries of the hashing table and keys for the search are defined using
this type:
@deftp {Data type} {struct ENTRY}
Both elements of this structure are pointers to zero-terminated strings.
This is a limiting restriction of the functionality of the
@code{hsearch} functions. They can only be used for data sets which use
the NUL character always and solely to terminate the records. It is not
possible to handle general binary data.
@table @code
@item char *key
Pointer to a zero-terminated string of characters describing the key for
the search or the element in the hashing table.
@item char *data
Pointer to a zero-terminated string of characters describing the data.
If the functions will be called only for searching an existing entry
this element might stay undefined since it is not used.
@end table
@end deftp
@comment search.h
@comment SVID
@deftypefun {ENTRY *} hsearch (ENTRY @var{item}, ACTION @var{action})
To search in a hashing table created using @code{hcreate} the
@code{hsearch} function must be used. This function can perform simple
search for an element (if @var{action} has the @code{FIND}) or it can
alternatively insert the key element into the hashing table, possibly
replacing a previous value (if @var{action} is @code{ENTER}).
The key is denoted by a pointer to an object of type @code{ENTRY}. For
locating the corresponding position in the hashing table only the
@code{key} element of the structure is used.
The return value depends on the @var{action} parameter value. If it is
@code{FIND} the value is a pointer to the matching element in the
hashing table or @code{NULL} if no matching element exists. If
@var{action} is @code{ENTER} the return value is only @code{NULL} if the
programs runs out of memory while adding the new element to the table.
Otherwise the return value is a pointer to the element in the hashing
table which contains newly added element based on the data in @var{key}.
@end deftypefun
As mentioned before the hashing table used by the functions described so
far is global and there can be at any time at most one hashing table in
the program. A solution is to use the following functions which are a
GNU extension. All have in common that they operate on a hashing table
which is described by the content of an object of the type @code{struct
hsearch_data}. This type should be treated as opaque, none of its
members should be changed directly.
@comment search.h
@comment GNU
@deftypefun int hcreate_r (size_t @var{nel}, struct hsearch_data *@var{htab})
The @code{hcreate_r} function initializes the object pointed to by
@var{htab} to contain a hashing table with at least @var{nel} elements.
So this function is equivalent to the @code{hcreate} function except
that the initialized data structure is controlled by the user.
This allows to have more than once hashing table at one time. The
memory necessary for the @code{struct hsearch_data} object can be
allocated dynamically.
The return value is non-zero if the operation were successful. if the
return value is zero something went wrong which probably means the
programs runs out of memory.
@end deftypefun
@comment search.h
@comment GNU
@deftypefun void hdestroy_r (struct hsearch_data *@var{htab})
The @code{hdestroy_r} function frees all resources allocated by the
@code{hcreate_r} function for this very same object @var{htab}. As for
@code{hdestroy} it is the programs responsibility to free the strings
for the elements of the table.
@end deftypefun
@comment search.h
@comment GNU
@deftypefun int hsearch_r (ENTRY @var{item}, ACTION @var{action}, ENTRY **@var{retval}, struct hsearch_data *@var{htab})
The @code{hsearch_r} function is equivalent to @code{hsearch}. The
meaning of the first two arguments is identical. But instead of
operating on a single global hashing table the function works on the
table described by the object pointed to by @var{htab} (which is
initialized by a call to @code{hcreate_r}).
Another difference to @code{hcreate} is that the pointer to the found
entry in the table is not the return value of the functions. It is
returned by storing it in a pointer variables pointed to by the
@var{retval} parameter. The return value of the function is an integer
value indicating success if it is non-zero and failure if it is zero.
In the later case the global variable @var{errno} signals the reason for
the failure.
@table @code
@item ENOMEM
The table is filled and @code{hsearch_r} was called with an so far
unknown key and @var{action} set to @code{ENTER}.
@item ESRCH
The @var{action} parameter is @code{FIND} and no corresponding element
is found in the table.
@end table
@end deftypefun
@node Tree Search Function
@section The @code{tsearch} function.
Another common form to organize data for efficient search is to use
trees. The @code{tsearch} function family provides a nice interface to
functions to organize possibly large amounts of data by providing a mean
access time proportional to the logarithm of the number of elements.
The GNU C library implementation even guarantees that this bound is
never exceeded even for input data which cause problems for simple
binary tree implementations.
The functions described in the chapter are all described in the @w{System
V} and X/Open specifications and are therefore quite portable.
In contrast to the @code{hsearch} functions the @code{tsearch} functions
can be used with arbitrary data and not only zero-terminated strings.
The @code{tsearch} functions have the advantage that no function to
initialize data structures is necessary. A simple pointer of type
@code{void *} initialized to @code{NULL} is a valid tree and can be
extended or searched.
@comment search.h
@comment SVID
@deftypefun {void *} tsearch (const void *@var{key}, void **@var{rootp}, comparison_fn_t @var{compar})
The @code{tsearch} function searches in the tree pointed to by
@code{*@var{rootp}} for an element matching @var{key}. The function
pointed to by @var{compar} is used to determine whether two elements
match. @xref{Comparison Functions}, for a specification of the functions
which can be used for the @var{compar} parameter.
If the tree does not contain a matching entry the @var{key} value will
be added to the tree. @code{tsearch} does not make a copy of the object
pointed to by @var{key} (how could it since the size is unknown).
Instead it adds a reference to this object which means the object must
be available as long as the tree data structure is used.
The tree is represented by a pointer to a pointer since it is sometimes
necessary to change the root node of the tree. So it must not be
assumed that the variable pointed to by @var{rootp} has the same value
after the call. This also shows that it is not safe to call the
@code{tsearch} function more than once at the same time using the same
tree. It is no problem to run it more than once at a time on different
trees.
The return value is a pointer to the matching element in the tree. If a
new element was created the pointer points to the new data (which is in
fact @var{key}). If an entry had to be created and the program ran out
of space @code{NULL} is returned.
@end deftypefun
@comment search.h
@comment SVID
@deftypefun {void *} tfind (const void *@var{key}, void *const *@var{rootp}, comparison_fn_t @var{compar})
The @code{tfind} function is similar to the @code{tsearch} function. It
locates an element matching the one pointed to by @var{key} and returns
a pointer to this element. But if no matching element is available no
new element is entered (note that the @var{rootp} parameter points to a
constant pointer). Instead the function returns @code{NULL}.
@end deftypefun
Another advantage of the @code{tsearch} function in contrast to the
@code{hsearch} functions is that there is an easy way to remove
elements.
@comment search.h
@comment SVID
@deftypefun {void *} tdelete (const void *@var{key}, void **@var{rootp}, comparison_fn_t @var{compar})
To remove a specific element matching @var{key} from the tree
@code{tdelete} can be used. It locates the matching element using the
same method as @code{tfind}. The corresponding element is then removed
and the data if this tree node is returned by the function. If there is
no matching entry in the tree nothing can be deleted and the function
returns @code{NULL}.
@end deftypefun
@comment search.h
@comment GNU
@deftypefun void tdestroy (void *@var{vroot}, __free_fn_t @var{freefct})
If the complete search tree has to be removed one can use
@code{tdestroy}. It frees all resources allocated by the @code{tsearch}
function to generate the tree pointed to by @var{vroot}.
For the data in each tree node the function @var{freefct} is called.
The pointer to the data is passed as the argument to the function. If
no such work is necessary @var{freefct} must point to a function doing
nothing. It is called in any case.
This function is a GNU extension and not covered by the @w{System V} or
X/Open specifications.
@end deftypefun
In addition to the function to create and destroy the tree data
structure there is another function which allows to apply a function on
all elements of the tree. The function must have this type:
@smallexample
void __action_fn_t (const void *nodep, VISIT value, int level);
@end smallexample
The @var{nodep} is the data value of the current node (once given as the
@var{key} argument to @code{tsearch}). @var{level} is a numeric value
which corresponds to the depth of the current node in the tree. The
root node has the depth @math{0} and its children have a depth of
@math{1} and so on. The @code{VISIT} type is an enumeration type.
@deftp {Data Type} VISIT
The @code{VISIT} value indicates the status of the current node in the
tree and how the function is called. The status of a node is either
`leaf' or `internal node'. For each leaf node the function is called
exactly once, for each internal node it is called three times: before
the first child is processed, after the first child is processed and
after both children are processed. This makes it possible to handle all
three methods of tree traversal (or even a combination of them).
@table @code
@item preorder
The current node is an internal node and the function is called before
the first child was processed.
@item endorder
The current node is an internal node and the function is called after
the first child was processed.
@item postorder
The current node is an internal node and the function is called after
the second child was processed.
@item leaf
The current node is a leaf.
@end table
@end deftp
@comment search.h
@comment SVID
@deftypefun void twalk (const void *@var{root}, __action_fn_t @var{action})
For each node in the tree with a node pointed to by @var{root} the
@code{twalk} function calls the function provided by the parameter
@var{action}. For leaf nodes the function is called exactly once with
@var{value} set to @code{leaf}. For internal nodes the function is
called three times, setting the @var{value} parameter or @var{action} to
the appropriate value. The @var{level} argument for the @var{action}
function is computed while descending the tree with increasing the value
by one for the descend to a child, starting with the value @math{0} for
the root node.
Since the functions used for the @var{action} parameter to @code{twalk}
must not modify the tree data it is safe to run @code{twalk} is more
than one thread at the same time working on the same tree. It is also
safe to call @code{tfind} in parallel. Functions which modify the tree
must not be used. Otherwise the behaviour is undefined.
@end deftypefun
| @node Searching and Sorting, Pattern Matching, Message Translation, Top
@c %MENU% General searching and sorting functions
@chapter Searching and Sorting
This chapter describes functions for searching and sorting arrays of
arbitrary objects. You pass the appropriate comparison function to be
applied as an argument, along with the size of the objects in the array
and the total number of elements.
@menu
* Comparison Functions:: Defining how to compare two objects.
Since the sort and search facilities
are general, you have to specify the
ordering.
* Array Search Function:: The @code{bsearch} function.
* Array Sort Function:: The @code{qsort} function.
* Search/Sort Example:: An example program.
* Hash Search Function:: The @code{hsearch} function.
* Tree Search Function:: The @code{tsearch} function.
@end menu
@node Comparison Functions
@section Defining the Comparison Function
@cindex Comparison Function
In order to use the sorted array library functions, you have to describe
how to compare the elements of the array.
To do this, you supply a comparison function to compare two elements of
the array. The library will call this function, passing as arguments
pointers to two array elements to be compared. Your comparison function
should return a value the way @code{strcmp} (@pxref{String/Array
Comparison}) does: negative if the first argument is ``less'' than the
second, zero if they are ``equal'', and positive if the first argument
is ``greater''.
Here is an example of a comparison function which works with an array of
numbers of type @code{double}:
@smallexample
int
compare_doubles (const void *a, const void *b)
@{
const double *da = (const double *) a;
const double *db = (const double *) b;
return (*da > *db) - (*da < *db);
@}
@end smallexample
The header file @file{stdlib.h} defines a name for the data type of
comparison functions. This type is a GNU extension.
@comment stdlib.h
@comment GNU
@tindex comparison_fn_t
@smallexample
int comparison_fn_t (const void *, const void *);
@end smallexample
@node Array Search Function
@section Array Search Function
@cindex search function (for arrays)
@cindex binary search function (for arrays)
@cindex array search function
Generally searching for a specific element in an array means that
potentially all elements must be checked. The GNU C library contains
functions to perform linear search. The prototypes for the following
two functions can be found in @file{search.h}.
@comment search.h
@comment SVID
@deftypefun {void *} lfind (const void *@var{key}, void *@var{base}, size_t *@var{nmemb}, size_t @var{size}, comparison_fn_t @var{compar})
The @code{lfind} function searches in the array with @code{*@var{nmemb}}
elements of @var{size} bytes pointed to by @var{base} for an element
which matches the one pointed to by @var{key}. The function pointed to
by @var{compar} is used decide whether two elements match.
The return value is a pointer to the matching element in the array
starting at @var{base} if it is found. If no matching element is
available @code{NULL} is returned.
The mean runtime of this function is @code{*@var{nmemb}}/2. This
function should only be used elements often get added to or deleted from
the array in which case it might not be useful to sort the array before
searching.
@end deftypefun
@comment search.h
@comment SVID
@deftypefun {void *} lsearch (const void *@var{key}, void *@var{base}, size_t *@var{nmemb}, size_t @var{size}, comparison_fn_t @var{compar})
The @code{lsearch} function is similar to the @code{lfind} function. It
searches the given array for an element and returns it if found. The
difference is that if no matching element is found the @code{lsearch}
function adds the object pointed to by @var{key} (with a size of
@var{size} bytes) at the end of the array and it increments the value of
@code{*@var{nmemb}} to reflect this addition.
This means for the caller that if it is not sure that the array contains
the element one is searching for the memory allocated for the array
starting at @var{base} must have room for at least @var{size} more
bytes. If one is sure the element is in the array it is better to use
@code{lfind} so having more room in the array is always necessary when
calling @code{lsearch}.
@end deftypefun
To search a sorted array for an element matching the key, use the
@code{bsearch} function. The prototype for this function is in
the header file @file{stdlib.h}.
@pindex stdlib.h
@comment stdlib.h
@comment ISO
@deftypefun {void *} bsearch (const void *@var{key}, const void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
The @code{bsearch} function searches the sorted array @var{array} for an object
that is equivalent to @var{key}. The array contains @var{count} elements,
each of which is of size @var{size} bytes.
The @var{compare} function is used to perform the comparison. This
function is called with two pointer arguments and should return an
integer less than, equal to, or greater than zero corresponding to
whether its first argument is considered less than, equal to, or greater
than its second argument. The elements of the @var{array} must already
be sorted in ascending order according to this comparison function.
The return value is a pointer to the matching array element, or a null
pointer if no match is found. If the array contains more than one element
that matches, the one that is returned is unspecified.
This function derives its name from the fact that it is implemented
using the binary search algorithm.
@end deftypefun
@node Array Sort Function
@section Array Sort Function
@cindex sort function (for arrays)
@cindex quick sort function (for arrays)
@cindex array sort function
To sort an array using an arbitrary comparison function, use the
@code{qsort} function. The prototype for this function is in
@file{stdlib.h}.
@pindex stdlib.h
@comment stdlib.h
@comment ISO
@deftypefun void qsort (void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
The @var{qsort} function sorts the array @var{array}. The array contains
@var{count} elements, each of which is of size @var{size}.
The @var{compare} function is used to perform the comparison on the
array elements. This function is called with two pointer arguments and
should return an integer less than, equal to, or greater than zero
corresponding to whether its first argument is considered less than,
equal to, or greater than its second argument.
@cindex stable sorting
@strong{Warning:} If two objects compare as equal, their order after
sorting is unpredictable. That is to say, the sorting is not stable.
This can make a difference when the comparison considers only part of
the elements. Two elements with the same sort key may differ in other
respects.
If you want the effect of a stable sort, you can get this result by
writing the comparison function so that, lacking other reason
distinguish between two elements, it compares them by their addresses.
Note that doing this may make the sorting algorithm less efficient, so
do it only if necessary.
Here is a simple example of sorting an array of doubles in numerical
order, using the comparison function defined above (@pxref{Comparison
Functions}):
@smallexample
@{
double *array;
int size;
@dots{}
qsort (array, size, sizeof (double), compare_doubles);
@}
@end smallexample
The @code{qsort} function derives its name from the fact that it was
originally implemented using the ``quick sort'' algorithm.
@end deftypefun
@node Search/Sort Example
@section Searching and Sorting Example
Here is an example showing the use of @code{qsort} and @code{bsearch}
with an array of structures. The objects in the array are sorted
by comparing their @code{name} fields with the @code{strcmp} function.
Then, we can look up individual objects based on their names.
@comment This example is dedicated to the memory of Jim Henson. RIP.
@smallexample
@include search.c.texi
@end smallexample
@cindex Kermit the frog
The output from this program looks like:
@smallexample
Kermit, the frog
Piggy, the pig
Gonzo, the whatever
Fozzie, the bear
Sam, the eagle
Robin, the frog
Animal, the animal
Camilla, the chicken
Sweetums, the monster
Dr. Strangepork, the pig
Link Hogthrob, the pig
Zoot, the human
Dr. Bunsen Honeydew, the human
Beaker, the human
Swedish Chef, the human
Animal, the animal
Beaker, the human
Camilla, the chicken
Dr. Bunsen Honeydew, the human
Dr. Strangepork, the pig
Fozzie, the bear
Gonzo, the whatever
Kermit, the frog
Link Hogthrob, the pig
Piggy, the pig
Robin, the frog
Sam, the eagle
Swedish Chef, the human
Sweetums, the monster
Zoot, the human
Kermit, the frog
Gonzo, the whatever
Couldn't find Janice.
@end smallexample
@node Hash Search Function
@section The @code{hsearch} function.
The functions mentioned so far in this chapter are searching in a sorted
or unsorted array. There are other methods to organize information
which later should be searched. The costs of insert, delete and search
differ. One possible implementation is using hashing tables.
@comment search.h
@comment SVID
@deftypefun int hcreate (size_t @var{nel})
The @code{hcreate} function creates a hashing table which can contain at
least @var{nel} elements. There is no possibility to grow this table so
it is necessary to choose the value for @var{nel} wisely. The used
methods to implement this function might make it necessary to make the
number of elements in the hashing table larger than the expected maximal
number of elements. Hashing tables usually work inefficient if they are
filled 80% or more. The constant access time guaranteed by hashing can
only be achieved if few collisions exist. See Knuth's ``The Art of
Computer Programming, Part 3: Searching and Sorting'' for more
information.
The weakest aspect of this function is that there can be at most one
hashing table used through the whole program. The table is allocated
in local memory out of control of the programmer. As an extension the
GNU C library provides an additional set of functions with an reentrant
interface which provide a similar interface but which allow to keep
arbitrary many hashing tables.
It is possible to use more than one hashing table in the program run if
the former table is first destroyed by a call to @code{hdestroy}.
The function returns a non-zero value if successful. If it return zero
something went wrong. This could either mean there is already a hashing
table in use or the program runs out of memory.
@end deftypefun
@comment search.h
@comment SVID
@deftypefun void hdestroy (void)
The @code{hdestroy} function can be used to free all the resources
allocated in a previous call of @code{hcreate}. After a call to this
function it is again possible to call @code{hcreate} and allocate a new
table with possibly different size.
It is important to remember that the elements contained in the hashing
table at the time @code{hdestroy} is called are @emph{not} freed by this
function. It is the responsibility of the program code to free those
strings (if necessary at all). Freeing all the element memory is not
possible without extra, separately kept information since there is no
function to iterate through all available elements in the hashing table.
If it is really necessary to free a table and all elements the
programmer has to keep a list of all table elements and before calling
@code{hdestroy} s/he has to free all element's data using this list.
This is a very unpleasant mechanism and it also shows that this kind of
hashing tables is mainly meant for tables which are created once and
used until the end of the program run.
@end deftypefun
Entries of the hashing table and keys for the search are defined using
this type:
@deftp {Data type} {struct ENTRY}
Both elements of this structure are pointers to zero-terminated strings.
This is a limiting restriction of the functionality of the
@code{hsearch} functions. They can only be used for data sets which use
the NUL character always and solely to terminate the records. It is not
possible to handle general binary data.
@table @code
@item char *key
Pointer to a zero-terminated string of characters describing the key for
the search or the element in the hashing table.
@item char *data
Pointer to a zero-terminated string of characters describing the data.
If the functions will be called only for searching an existing entry
this element might stay undefined since it is not used.
@end table
@end deftp
@comment search.h
@comment SVID
@deftypefun {ENTRY *} hsearch (ENTRY @var{item}, ACTION @var{action})
To search in a hashing table created using @code{hcreate} the
@code{hsearch} function must be used. This function can perform simple
search for an element (if @var{action} has the @code{FIND}) or it can
alternatively insert the key element into the hashing table, possibly
replacing a previous value (if @var{action} is @code{ENTER}).
The key is denoted by a pointer to an object of type @code{ENTRY}. For
locating the corresponding position in the hashing table only the
@code{key} element of the structure is used.
The return value depends on the @var{action} parameter value. If it is
@code{FIND} the value is a pointer to the matching element in the
hashing table or @code{NULL} if no matching element exists. If
@var{action} is @code{ENTER} the return value is only @code{NULL} if the
programs runs out of memory while adding the new element to the table.
Otherwise the return value is a pointer to the element in the hashing
table which contains newly added element based on the data in @var{key}.
@end deftypefun
As mentioned before the hashing table used by the functions described so
far is global and there can be at any time at most one hashing table in
the program. A solution is to use the following functions which are a
GNU extension. All have in common that they operate on a hashing table
which is described by the content of an object of the type @code{struct
hsearch_data}. This type should be treated as opaque, none of its
members should be changed directly.
@comment search.h
@comment GNU
@deftypefun int hcreate_r (size_t @var{nel}, struct hsearch_data *@var{htab})
The @code{hcreate_r} function initializes the object pointed to by
@var{htab} to contain a hashing table with at least @var{nel} elements.
So this function is equivalent to the @code{hcreate} function except
that the initialized data structure is controlled by the user.
This allows to have more than once hashing table at one time. The
memory necessary for the @code{struct hsearch_data} object can be
allocated dynamically.
The return value is non-zero if the operation were successful. if the
return value is zero something went wrong which probably means the
programs runs out of memory.
@end deftypefun
@comment search.h
@comment GNU
@deftypefun void hdestroy_r (struct hsearch_data *@var{htab})
The @code{hdestroy_r} function frees all resources allocated by the
@code{hcreate_r} function for this very same object @var{htab}. As for
@code{hdestroy} it is the programs responsibility to free the strings
for the elements of the table.
@end deftypefun
@comment search.h
@comment GNU
@deftypefun int hsearch_r (ENTRY @var{item}, ACTION @var{action}, ENTRY **@var{retval}, struct hsearch_data *@var{htab})
The @code{hsearch_r} function is equivalent to @code{hsearch}. The
meaning of the first two arguments is identical. But instead of
operating on a single global hashing table the function works on the
table described by the object pointed to by @var{htab} (which is
initialized by a call to @code{hcreate_r}).
Another difference to @code{hcreate} is that the pointer to the found
entry in the table is not the return value of the functions. It is
returned by storing it in a pointer variables pointed to by the
@var{retval} parameter. The return value of the function is an integer
value indicating success if it is non-zero and failure if it is zero.
In the later case the global variable @var{errno} signals the reason for
the failure.
@table @code
@item ENOMEM
The table is filled and @code{hsearch_r} was called with an so far
unknown key and @var{action} set to @code{ENTER}.
@item ESRCH
The @var{action} parameter is @code{FIND} and no corresponding element
is found in the table.
@end table
@end deftypefun
@node Tree Search Function
@section The @code{tsearch} function.
Another common form to organize data for efficient search is to use
trees. The @code{tsearch} function family provides a nice interface to
functions to organize possibly large amounts of data by providing a mean
access time proportional to the logarithm of the number of elements.
The GNU C library implementation even guarantees that this bound is
never exceeded even for input data which cause problems for simple
binary tree implementations.
The functions described in the chapter are all described in the @w{System
V} and X/Open specifications and are therefore quite portable.
In contrast to the @code{hsearch} functions the @code{tsearch} functions
can be used with arbitrary data and not only zero-terminated strings.
The @code{tsearch} functions have the advantage that no function to
initialize data structures is necessary. A simple pointer of type
@code{void *} initialized to @code{NULL} is a valid tree and can be
extended or searched.
@comment search.h
@comment SVID
@deftypefun {void *} tsearch (const void *@var{key}, void **@var{rootp}, comparison_fn_t @var{compar})
The @code{tsearch} function searches in the tree pointed to by
@code{*@var{rootp}} for an element matching @var{key}. The function
pointed to by @var{compar} is used to determine whether two elements
match. @xref{Comparison Functions}, for a specification of the functions
which can be used for the @var{compar} parameter.
If the tree does not contain a matching entry the @var{key} value will
be added to the tree. @code{tsearch} does not make a copy of the object
pointed to by @var{key} (how could it since the size is unknown).
Instead it adds a reference to this object which means the object must
be available as long as the tree data structure is used.
The tree is represented by a pointer to a pointer since it is sometimes
necessary to change the root node of the tree. So it must not be
assumed that the variable pointed to by @var{rootp} has the same value
after the call. This also shows that it is not safe to call the
@code{tsearch} function more than once at the same time using the same
tree. It is no problem to run it more than once at a time on different
trees.
The return value is a pointer to the matching element in the tree. If a
new element was created the pointer points to the new data (which is in
fact @var{key}). If an entry had to be created and the program ran out
of space @code{NULL} is returned.
@end deftypefun
@comment search.h
@comment SVID
@deftypefun {void *} tfind (const void *@var{key}, void *const *@var{rootp}, comparison_fn_t @var{compar})
The @code{tfind} function is similar to the @code{tsearch} function. It
locates an element matching the one pointed to by @var{key} and returns
a pointer to this element. But if no matching element is available no
new element is entered (note that the @var{rootp} parameter points to a
constant pointer). Instead the function returns @code{NULL}.
@end deftypefun
Another advantage of the @code{tsearch} function in contrast to the
@code{hsearch} functions is that there is an easy way to remove
elements.
@comment search.h
@comment SVID
@deftypefun {void *} tdelete (const void *@var{key}, void **@var{rootp}, comparison_fn_t @var{compar})
To remove a specific element matching @var{key} from the tree
@code{tdelete} can be used. It locates the matching element using the
same method as @code{tfind}. The corresponding element is then removed
and a pointer to the parent of the deleted node is returned by the
function. If there is no matching entry in the tree nothing can be
deleted and the function returns @code{NULL}. If the root of the tree
is deleted @code{tdelete} returns some unspecified value not equal to
@code{NULL}.
@end deftypefun
@comment search.h
@comment GNU
@deftypefun void tdestroy (void *@var{vroot}, __free_fn_t @var{freefct})
If the complete search tree has to be removed one can use
@code{tdestroy}. It frees all resources allocated by the @code{tsearch}
function to generate the tree pointed to by @var{vroot}.
For the data in each tree node the function @var{freefct} is called.
The pointer to the data is passed as the argument to the function. If
no such work is necessary @var{freefct} must point to a function doing
nothing. It is called in any case.
This function is a GNU extension and not covered by the @w{System V} or
X/Open specifications.
@end deftypefun
In addition to the function to create and destroy the tree data
structure there is another function which allows to apply a function on
all elements of the tree. The function must have this type:
@smallexample
void __action_fn_t (const void *nodep, VISIT value, int level);
@end smallexample
The @var{nodep} is the data value of the current node (once given as the
@var{key} argument to @code{tsearch}). @var{level} is a numeric value
which corresponds to the depth of the current node in the tree. The
root node has the depth @math{0} and its children have a depth of
@math{1} and so on. The @code{VISIT} type is an enumeration type.
@deftp {Data Type} VISIT
The @code{VISIT} value indicates the status of the current node in the
tree and how the function is called. The status of a node is either
`leaf' or `internal node'. For each leaf node the function is called
exactly once, for each internal node it is called three times: before
the first child is processed, after the first child is processed and
after both children are processed. This makes it possible to handle all
three methods of tree traversal (or even a combination of them).
@table @code
@item preorder
The current node is an internal node and the function is called before
the first child was processed.
@item endorder
The current node is an internal node and the function is called after
the first child was processed.
@item postorder
The current node is an internal node and the function is called after
the second child was processed.
@item leaf
The current node is a leaf.
@end table
@end deftp
@comment search.h
@comment SVID
@deftypefun void twalk (const void *@var{root}, __action_fn_t @var{action})
For each node in the tree with a node pointed to by @var{root} the
@code{twalk} function calls the function provided by the parameter
@var{action}. For leaf nodes the function is called exactly once with
@var{value} set to @code{leaf}. For internal nodes the function is
called three times, setting the @var{value} parameter or @var{action} to
the appropriate value. The @var{level} argument for the @var{action}
function is computed while descending the tree with increasing the value
by one for the descend to a child, starting with the value @math{0} for
the root node.
Since the functions used for the @var{action} parameter to @code{twalk}
must not modify the tree data it is safe to run @code{twalk} is more
than one thread at the same time working on the same tree. It is also
safe to call @code{tfind} in parallel. Functions which modify the tree
must not be used. Otherwise the behaviour is undefined.
@end deftypefun
|
253,444,729,592,566 | /*
* Copyright (c) 1983, 1988, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)syslog.c 8.4 (Berkeley) 3/18/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/syslog.h>
#include <sys/uio.h>
#include <netdb.h>
#include <errno.h>
#include <fcntl.h>
#include <paths.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <bits/libc-lock.h>
#include <signal.h>
#if __STDC__
#include <stdarg.h>
#else
#include <varargs.h>
#endif
#ifdef USE_IN_LIBIO
# include <libio/iolibio.h>
# define ftell(s) _IO_ftell (s)
#endif
static int LogType = SOCK_DGRAM; /* type of socket connection */
static int LogFile = -1; /* fd for log */
static int connected; /* have done connect */
static int LogStat; /* status bits, set by openlog() */
static const char *LogTag; /* string to tag the entry with */
static int LogFacility = LOG_USER; /* default facility code */
static int LogMask = 0xff; /* mask of priorities to be logged */
extern char *__progname; /* Program name, from crt0. */
/* Define the lock. */
__libc_lock_define_initialized (static, syslog_lock)
static void openlog_internal(const char *, int, int) internal_function;
static void closelog_internal(void);
static void sigpipe_handler (int);
#ifdef _LIBC_REENTRANT
static void cancel_handler (void *);
#endif
/*
* syslog, vsyslog --
* print message on log file; output is intended for syslogd(8).
*/
void
#if __STDC__
syslog(int pri, const char *fmt, ...)
#else
syslog(pri, fmt, va_alist)
int pri;
char *fmt;
va_dcl
#endif
{
va_list ap;
#if __STDC__
va_start(ap, fmt);
#else
va_start(ap);
#endif
vsyslog(pri, fmt, ap);
va_end(ap);
}
void
vsyslog(pri, fmt, ap)
int pri;
register const char *fmt;
va_list ap;
{
struct tm now_tm;
time_t now;
int fd;
FILE *f;
char *buf = 0;
size_t bufsize = 0;
size_t prioff, msgoff;
struct sigaction action, oldaction;
struct sigaction *oldaction_ptr = NULL;
int sigpipe;
int saved_errno = errno;
#define INTERNALLOG LOG_ERR|LOG_CONS|LOG_PERROR|LOG_PID
/* Check for invalid bits. */
if (pri & ~(LOG_PRIMASK|LOG_FACMASK)) {
syslog(INTERNALLOG,
"syslog: unknown facility/priority: %x", pri);
pri &= LOG_PRIMASK|LOG_FACMASK;
}
/* Check priority against setlogmask values. */
if ((LOG_MASK (LOG_PRI (pri)) & LogMask) == 0)
return;
/* Set default facility if none specified. */
if ((pri & LOG_FACMASK) == 0)
pri |= LogFacility;
/* Build the message in a memory-buffer stream. */
f = open_memstream (&buf, &bufsize);
prioff = fprintf (f, "<%d>", pri);
(void) time (&now);
#ifdef USE_IN_LIBIO
f->_IO_write_ptr += strftime (f->_IO_write_ptr,
f->_IO_write_end - f->_IO_write_ptr,
"%h %e %T ",
__localtime_r (&now, &now_tm));
#else
f->__bufp += strftime (f->__bufp, f->__put_limit - f->__bufp,
"%h %e %T ", __localtime_r (&now, &now_tm));
#endif
msgoff = ftell (f);
if (LogTag == NULL)
LogTag = __progname;
if (LogTag != NULL)
fputs_unlocked (LogTag, f);
if (LogStat & LOG_PID)
fprintf (f, "[%d]", __getpid ());
if (LogTag != NULL)
putc_unlocked (':', f), putc_unlocked (' ', f);
/* Restore errno for %m format. */
__set_errno (saved_errno);
/* We have the header. Print the user's format into the buffer. */
vfprintf (f, fmt, ap);
/* Close the memory stream; this will finalize the data
into a malloc'd buffer in BUF. */
fclose (f);
/* Output to stderr if requested. */
if (LogStat & LOG_PERROR) {
struct iovec iov[2];
register struct iovec *v = iov;
v->iov_base = buf + msgoff;
v->iov_len = bufsize - msgoff;
++v;
v->iov_base = (char *) "\n";
v->iov_len = 1;
(void)__writev(STDERR_FILENO, iov, 2);
}
/* Prepare for multiple users. We have to take care: open and
write are cancellation points. */
__libc_cleanup_region_start ((void (*) (void *)) cancel_handler,
&oldaction_ptr);
__libc_lock_lock (syslog_lock);
/* Prepare for a broken connection. */
memset (&action, 0, sizeof (action));
action.sa_handler = sigpipe_handler;
sigemptyset (&action.sa_mask);
sigpipe = __sigaction (SIGPIPE, &action, &oldaction);
if (sigpipe == 0)
oldaction_ptr = &oldaction;
/* Get connected, output the message to the local logger. */
if (!connected)
openlog_internal(LogTag, LogStat | LOG_NDELAY, 0);
/* If we have a SOCK_STREAM connection, also send ASCII NUL as
a record terminator. */
if (LogType == SOCK_STREAM)
++bufsize;
if (!connected || __send(LogFile, buf, bufsize, 0) < 0)
{
closelog_internal (); /* attempt re-open next time */
/*
* Output the message to the console; don't worry about blocking,
* if console blocks everything will. Make sure the error reported
* is the one from the syslogd failure.
*/
if (LogStat & LOG_CONS &&
(fd = __open(_PATH_CONSOLE, O_WRONLY|O_NOCTTY, 0)) >= 0)
{
dprintf (fd, "%s\r\n", buf + msgoff);
(void)__close(fd);
}
}
if (sigpipe == 0)
__sigaction (SIGPIPE, &oldaction, (struct sigaction *) NULL);
/* End of critical section. */
__libc_cleanup_region_end (0);
__libc_lock_unlock (syslog_lock);
free (buf);
}
static struct sockaddr SyslogAddr; /* AF_UNIX address of local logger */
static void
internal_function
openlog_internal(const char *ident, int logstat, int logfac)
{
if (ident != NULL)
LogTag = ident;
LogStat = logstat;
if (logfac != 0 && (logfac &~ LOG_FACMASK) == 0)
LogFacility = logfac;
while (1) {
if (LogFile == -1) {
SyslogAddr.sa_family = AF_UNIX;
(void)strncpy(SyslogAddr.sa_data, _PATH_LOG,
sizeof(SyslogAddr.sa_data));
if (LogStat & LOG_NDELAY) {
if ((LogFile = __socket(AF_UNIX, LogType, 0))
== -1)
return;
(void)__fcntl(LogFile, F_SETFD, 1);
}
}
if (LogFile != -1 && !connected)
{
int old_errno = errno;
if (__connect(LogFile, &SyslogAddr, sizeof(SyslogAddr))
== -1)
{
int saved_errno = errno;
(void)__close(LogFile);
LogFile = -1;
if (LogType == SOCK_DGRAM
&& saved_errno == EPROTOTYPE)
{
/* retry with next SOCK_STREAM: */
LogType = SOCK_STREAM;
__set_errno (old_errno);
continue;
}
} else
connected = 1;
}
break;
}
}
void
openlog (const char *ident, int logstat, int logfac)
{
/* Protect against multiple users. */
__libc_cleanup_region_start ((void (*) __P ((void *))) __libc_mutex_unlock,
&syslog_lock);
__libc_lock_lock (syslog_lock);
openlog_internal (ident, logstat, logfac);
/* Free the lock. */
__libc_cleanup_region_end (1);
}
static void
sigpipe_handler (int signo)
{
closelog_internal ();
}
static void
closelog_internal()
{
if (!connected)
return;
__close (LogFile);
LogFile = -1;
connected = 0;
}
void
closelog ()
{
/* Protect against multiple users. */
__libc_cleanup_region_start ((void (*) __P ((void *))) __libc_mutex_unlock,
&syslog_lock);
__libc_lock_lock (syslog_lock);
closelog_internal ();
LogTag = NULL;
/* Free the lock. */
__libc_cleanup_region_end (1);
}
#ifdef _LIBC_REENTRANT
static void
cancel_handler (void *ptr)
{
/* Restore the old signal handler. */
struct sigaction *oldaction = *((struct sigaction **) ptr);
if (oldaction != (struct sigaction *) NULL)
__sigaction (SIGPIPE, oldaction, (struct sigaction *) NULL);
/* Free the lock. */
__libc_lock_unlock (syslog_lock);
}
#endif
/* setlogmask -- set the log mask level */
int
setlogmask(pmask)
int pmask;
{
int omask;
omask = LogMask;
if (pmask != 0)
LogMask = pmask;
return (omask);
}
| /*
* Copyright (c) 1983, 1988, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)syslog.c 8.4 (Berkeley) 3/18/94";
#endif /* LIBC_SCCS and not lint */
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/syslog.h>
#include <sys/uio.h>
#include <netdb.h>
#include <errno.h>
#include <fcntl.h>
#include <paths.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <bits/libc-lock.h>
#include <signal.h>
#if __STDC__
#include <stdarg.h>
#else
#include <varargs.h>
#endif
#ifdef USE_IN_LIBIO
# include <libio/iolibio.h>
# define ftell(s) _IO_ftell (s)
#endif
static int LogType = SOCK_DGRAM; /* type of socket connection */
static int LogFile = -1; /* fd for log */
static int connected; /* have done connect */
static int LogStat; /* status bits, set by openlog() */
static const char *LogTag; /* string to tag the entry with */
static int LogFacility = LOG_USER; /* default facility code */
static int LogMask = 0xff; /* mask of priorities to be logged */
extern char *__progname; /* Program name, from crt0. */
/* Define the lock. */
__libc_lock_define_initialized (static, syslog_lock)
static void openlog_internal(const char *, int, int) internal_function;
static void closelog_internal(void);
static void sigpipe_handler (int);
#ifdef _LIBC_REENTRANT
static void cancel_handler (void *);
#endif
/*
* syslog, vsyslog --
* print message on log file; output is intended for syslogd(8).
*/
void
#if __STDC__
syslog(int pri, const char *fmt, ...)
#else
syslog(pri, fmt, va_alist)
int pri;
char *fmt;
va_dcl
#endif
{
va_list ap;
#if __STDC__
va_start(ap, fmt);
#else
va_start(ap);
#endif
vsyslog(pri, fmt, ap);
va_end(ap);
}
void
vsyslog(pri, fmt, ap)
int pri;
register const char *fmt;
va_list ap;
{
struct tm now_tm;
time_t now;
int fd;
FILE *f;
char *buf = 0;
size_t bufsize = 0;
size_t prioff, msgoff;
struct sigaction action, oldaction;
struct sigaction *oldaction_ptr = NULL;
int sigpipe;
int saved_errno = errno;
#define INTERNALLOG LOG_ERR|LOG_CONS|LOG_PERROR|LOG_PID
/* Check for invalid bits. */
if (pri & ~(LOG_PRIMASK|LOG_FACMASK)) {
syslog(INTERNALLOG,
"syslog: unknown facility/priority: %x", pri);
pri &= LOG_PRIMASK|LOG_FACMASK;
}
/* Check priority against setlogmask values. */
if ((LOG_MASK (LOG_PRI (pri)) & LogMask) == 0)
return;
/* Set default facility if none specified. */
if ((pri & LOG_FACMASK) == 0)
pri |= LogFacility;
/* Build the message in a memory-buffer stream. */
f = open_memstream (&buf, &bufsize);
prioff = fprintf (f, "<%d>", pri);
(void) time (&now);
#ifdef USE_IN_LIBIO
f->_IO_write_ptr += strftime (f->_IO_write_ptr,
f->_IO_write_end - f->_IO_write_ptr,
"%h %e %T ",
__localtime_r (&now, &now_tm));
#else
f->__bufp += strftime (f->__bufp, f->__put_limit - f->__bufp,
"%h %e %T ", __localtime_r (&now, &now_tm));
#endif
msgoff = ftell (f);
if (LogTag == NULL)
LogTag = __progname;
if (LogTag != NULL)
fputs_unlocked (LogTag, f);
if (LogStat & LOG_PID)
fprintf (f, "[%d]", __getpid ());
if (LogTag != NULL)
putc_unlocked (':', f), putc_unlocked (' ', f);
/* Restore errno for %m format. */
__set_errno (saved_errno);
/* We have the header. Print the user's format into the buffer. */
vfprintf (f, fmt, ap);
/* Close the memory stream; this will finalize the data
into a malloc'd buffer in BUF. */
fclose (f);
/* Output to stderr if requested. */
if (LogStat & LOG_PERROR) {
struct iovec iov[2];
register struct iovec *v = iov;
v->iov_base = buf + msgoff;
v->iov_len = bufsize - msgoff;
/* Append a newline if necessary. */
if (buf[bufsize - 1] != '\n')
{
++v;
v->iov_base = (char *) "\n";
v->iov_len = 1;
}
(void)__writev(STDERR_FILENO, iov, v - iov + 1);
}
/* Prepare for multiple users. We have to take care: open and
write are cancellation points. */
__libc_cleanup_region_start ((void (*) (void *)) cancel_handler,
&oldaction_ptr);
__libc_lock_lock (syslog_lock);
/* Prepare for a broken connection. */
memset (&action, 0, sizeof (action));
action.sa_handler = sigpipe_handler;
sigemptyset (&action.sa_mask);
sigpipe = __sigaction (SIGPIPE, &action, &oldaction);
if (sigpipe == 0)
oldaction_ptr = &oldaction;
/* Get connected, output the message to the local logger. */
if (!connected)
openlog_internal(LogTag, LogStat | LOG_NDELAY, 0);
/* If we have a SOCK_STREAM connection, also send ASCII NUL as
a record terminator. */
if (LogType == SOCK_STREAM)
++bufsize;
if (!connected || __send(LogFile, buf, bufsize, 0) < 0)
{
closelog_internal (); /* attempt re-open next time */
/*
* Output the message to the console; don't worry about blocking,
* if console blocks everything will. Make sure the error reported
* is the one from the syslogd failure.
*/
if (LogStat & LOG_CONS &&
(fd = __open(_PATH_CONSOLE, O_WRONLY|O_NOCTTY, 0)) >= 0)
{
dprintf (fd, "%s\r\n", buf + msgoff);
(void)__close(fd);
}
}
if (sigpipe == 0)
__sigaction (SIGPIPE, &oldaction, (struct sigaction *) NULL);
/* End of critical section. */
__libc_cleanup_region_end (0);
__libc_lock_unlock (syslog_lock);
free (buf);
}
static struct sockaddr SyslogAddr; /* AF_UNIX address of local logger */
static void
internal_function
openlog_internal(const char *ident, int logstat, int logfac)
{
if (ident != NULL)
LogTag = ident;
LogStat = logstat;
if (logfac != 0 && (logfac &~ LOG_FACMASK) == 0)
LogFacility = logfac;
while (1) {
if (LogFile == -1) {
SyslogAddr.sa_family = AF_UNIX;
(void)strncpy(SyslogAddr.sa_data, _PATH_LOG,
sizeof(SyslogAddr.sa_data));
if (LogStat & LOG_NDELAY) {
if ((LogFile = __socket(AF_UNIX, LogType, 0))
== -1)
return;
(void)__fcntl(LogFile, F_SETFD, 1);
}
}
if (LogFile != -1 && !connected)
{
int old_errno = errno;
if (__connect(LogFile, &SyslogAddr, sizeof(SyslogAddr))
== -1)
{
int saved_errno = errno;
(void)__close(LogFile);
LogFile = -1;
if (LogType == SOCK_DGRAM
&& saved_errno == EPROTOTYPE)
{
/* retry with next SOCK_STREAM: */
LogType = SOCK_STREAM;
__set_errno (old_errno);
continue;
}
} else
connected = 1;
}
break;
}
}
void
openlog (const char *ident, int logstat, int logfac)
{
/* Protect against multiple users. */
__libc_cleanup_region_start ((void (*) __P ((void *))) __libc_mutex_unlock,
&syslog_lock);
__libc_lock_lock (syslog_lock);
openlog_internal (ident, logstat, logfac);
/* Free the lock. */
__libc_cleanup_region_end (1);
}
static void
sigpipe_handler (int signo)
{
closelog_internal ();
}
static void
closelog_internal()
{
if (!connected)
return;
__close (LogFile);
LogFile = -1;
connected = 0;
}
void
closelog ()
{
/* Protect against multiple users. */
__libc_cleanup_region_start ((void (*) __P ((void *))) __libc_mutex_unlock,
&syslog_lock);
__libc_lock_lock (syslog_lock);
closelog_internal ();
LogTag = NULL;
/* Free the lock. */
__libc_cleanup_region_end (1);
}
#ifdef _LIBC_REENTRANT
static void
cancel_handler (void *ptr)
{
/* Restore the old signal handler. */
struct sigaction *oldaction = *((struct sigaction **) ptr);
if (oldaction != (struct sigaction *) NULL)
__sigaction (SIGPIPE, oldaction, (struct sigaction *) NULL);
/* Free the lock. */
__libc_lock_unlock (syslog_lock);
}
#endif
/* setlogmask -- set the log mask level */
int
setlogmask(pmask)
int pmask;
{
int omask;
omask = LogMask;
if (pmask != 0)
LogMask = pmask;
return (omask);
}
|
197,890,948,684,264 | # Copyright (C) 1991,92,93,94,95,96,97,98,99 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
# You should have received a copy of the GNU Library General Public
# License along with the GNU C Library; see the file COPYING.LIB. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# Sub-makefile for POSIX portion of the library.
#
subdir := posix
headers := sys/utsname.h sys/times.h sys/wait.h sys/types.h unistd.h \
glob.h regex.h wordexp.h fnmatch.h bits/types.h getopt.h \
bits/posix1_lim.h bits/posix2_lim.h bits/posix_opt.h \
bits/local_lim.h tar.h bits/utsname.h bits/confname.h \
bits/waitflags.h bits/waitstatus.h sys/unistd.h sched.h \
bits/sched.h re_comp.h wait.h bits/environments.h cpio.h \
sys/sysmacros.h
distribute := confstr.h TESTS TESTS2C.sed testcases.h \
PTESTS PTESTS2C.sed ptestcases.h \
globtest.c globtest.sh wordexp-tst.sh annexc.c
routines := \
uname \
times \
wait waitpid wait3 wait4 waitid \
alarm sleep pause nanosleep \
fork vfork _exit \
execve fexecve execv execle execl execvp execlp \
getpid getppid \
getuid geteuid getgid getegid getgroups setuid setgid group_member \
getpgid setpgid getpgrp bsd-getpgrp setpgrp getsid setsid \
getlogin getlogin_r setlogin \
pathconf sysconf fpathconf \
glob glob64 fnmatch regex \
confstr \
getopt getopt1 getopt_init \
sched_setp sched_getp sched_sets sched_gets sched_yield sched_primax \
sched_primin sched_rr_gi \
getaddrinfo gai_strerror wordexp \
pread pwrite pread64 pwrite64
include ../Makeconfig
aux := init-posix environ
tests := tstgetopt testfnm runtests runptests \
tst-preadwrite test-vfork regexbug1
ifeq (yes,$(build-shared))
test-srcs := globtest
tests += wordexp-test
endif
others := getconf
install-bin := getconf
ifeq (yes,$(build-static))
install-lib := libposix.a
endif
gpl2lgpl := getopt.c getopt1.c getopt.h regex.c regex.h
before-compile := testcases.h ptestcases.h
# So they get cleaned up.
generated := $(addprefix wordexp-test-result, 1 2 3 4 5 6 7 8 9 10) \
annexc annexc.out
include ../Rules
ifeq (no,$(cross-compiling))
# globtest and wordexp-test currently only works with shared libraries
ifeq (yes,$(build-shared))
.PHONY: do-globtest do-wordexp-test
tests: do-globtest do-wordexp-test
do-globtest: $(objpfx)globtest
$(SHELL) -e globtest.sh $(common-objpfx) $(elf-objpfx) \
$(rtld-installed-name)
do-wordexp-test: $(objpfx)wordexp-test
$(SHELL) -e wordexp-tst.sh $(common-objpfx) $(elf-objpfx) \
$(rtld-installed-name)
endif
endif
CFLAGS-regex.c = -Wno-unused -Wno-strict-prototypes -DDEBUG
CFLAGS-getaddrinfo.c = -DRESOLVER
$(objpfx)libposix.a: $(dep-dummy-lib); $(make-dummy-lib)
lib: $(objpfx)libposix.a
testcases.h: TESTS TESTS2C.sed
sed -f TESTS2C.sed < $< > $@T
mv -f $@T $@
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -mRegenerated $@
endif
ptestcases.h: PTESTS PTESTS2C.sed
sed -f PTESTS2C.sed < $< > $@T
mv -f $@T $@
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -mRegenerated $@
endif
# Make the standalone glob/fnmatch package.
glob.tar: glob/ChangeLog glob/COPYING.LIB \
glob/Makefile.in glob/configure glob/configure.in glob/configure.bat\
glob/SCOPTIONS glob/SMakefile glob/Makefile.ami \
glob/fnmatch.h glob/glob.h glob/fnmatch.c glob/glob.c
tar cho$(verbose)f $@ $^
glob/%.c: %.c
rm -f $@
ln -s ../$< $@
glob/%.h: %.h
rm -f $@
ln -s ../$< $@
glob/configure: glob/configure.in
cd glob && autoconf $(ACFLAGS)
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -m'Regenerated: autoconf $(ACFLAGS) $<' $@
endif
glob/ChangeLog: ../ChangeLog
changelog-extract --regexp 'posix/(glob|fnmatch).*' < $< > $@.new
chmod a-w $@.new
mv -f $@.new $@
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -mRegenerated $@
endif
%.Z: %
compress -c $< > $@-tmp
mv $@-tmp $@
%.gz: %
gzip -9v -c $< > $@-tmp
mv $@-tmp $@
# Run a test on the header files we use.
# XXX Please note that for now we ignore the result of this test.
tests: $(objpfx)annexc
-$(dir $<)$(notdir $<) '$(CC)' \
'-I../include -I.. $(+sysdep-includes)' > $<.out
$(objpfx)annexc: annexc.c
$(native-compile)
| # Copyright (C) 1991,92,93,94,95,96,97,98,99 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
# You should have received a copy of the GNU Library General Public
# License along with the GNU C Library; see the file COPYING.LIB. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# Sub-makefile for POSIX portion of the library.
#
subdir := posix
headers := sys/utsname.h sys/times.h sys/wait.h sys/types.h unistd.h \
glob.h regex.h wordexp.h fnmatch.h bits/types.h getopt.h \
bits/posix1_lim.h bits/posix2_lim.h bits/posix_opt.h \
bits/local_lim.h tar.h bits/utsname.h bits/confname.h \
bits/waitflags.h bits/waitstatus.h sys/unistd.h sched.h \
bits/sched.h re_comp.h wait.h bits/environments.h cpio.h \
sys/sysmacros.h
distribute := confstr.h TESTS TESTS2C.sed testcases.h \
PTESTS PTESTS2C.sed ptestcases.h \
globtest.c globtest.sh wordexp-tst.sh annexc.c
routines := \
uname \
times \
wait waitpid wait3 wait4 waitid \
alarm sleep pause nanosleep \
fork vfork _exit \
execve fexecve execv execle execl execvp execlp \
getpid getppid \
getuid geteuid getgid getegid getgroups setuid setgid group_member \
getpgid setpgid getpgrp bsd-getpgrp setpgrp getsid setsid \
getlogin getlogin_r setlogin \
pathconf sysconf fpathconf \
glob glob64 fnmatch regex \
confstr \
getopt getopt1 getopt_init \
sched_setp sched_getp sched_sets sched_gets sched_yield sched_primax \
sched_primin sched_rr_gi \
getaddrinfo gai_strerror wordexp \
pread pwrite pread64 pwrite64
include ../Makeconfig
aux := init-posix environ
tests := tstgetopt testfnm runtests runptests \
tst-preadwrite test-vfork regexbug1 tst-getlogin
ifeq (yes,$(build-shared))
test-srcs := globtest
tests += wordexp-test
endif
others := getconf
install-bin := getconf
ifeq (yes,$(build-static))
install-lib := libposix.a
endif
gpl2lgpl := getopt.c getopt1.c getopt.h regex.c regex.h
before-compile := testcases.h ptestcases.h
# So they get cleaned up.
generated := $(addprefix wordexp-test-result, 1 2 3 4 5 6 7 8 9 10) \
annexc annexc.out
include ../Rules
ifeq (no,$(cross-compiling))
# globtest and wordexp-test currently only works with shared libraries
ifeq (yes,$(build-shared))
.PHONY: do-globtest do-wordexp-test
tests: do-globtest do-wordexp-test
do-globtest: $(objpfx)globtest
$(SHELL) -e globtest.sh $(common-objpfx) $(elf-objpfx) \
$(rtld-installed-name)
do-wordexp-test: $(objpfx)wordexp-test
$(SHELL) -e wordexp-tst.sh $(common-objpfx) $(elf-objpfx) \
$(rtld-installed-name)
endif
endif
CFLAGS-regex.c = -Wno-unused -Wno-strict-prototypes -DDEBUG
CFLAGS-getaddrinfo.c = -DRESOLVER
$(objpfx)libposix.a: $(dep-dummy-lib); $(make-dummy-lib)
lib: $(objpfx)libposix.a
testcases.h: TESTS TESTS2C.sed
sed -f TESTS2C.sed < $< > $@T
mv -f $@T $@
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -mRegenerated $@
endif
ptestcases.h: PTESTS PTESTS2C.sed
sed -f PTESTS2C.sed < $< > $@T
mv -f $@T $@
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -mRegenerated $@
endif
# Make the standalone glob/fnmatch package.
glob.tar: glob/ChangeLog glob/COPYING.LIB \
glob/Makefile.in glob/configure glob/configure.in glob/configure.bat\
glob/SCOPTIONS glob/SMakefile glob/Makefile.ami \
glob/fnmatch.h glob/glob.h glob/fnmatch.c glob/glob.c
tar cho$(verbose)f $@ $^
glob/%.c: %.c
rm -f $@
ln -s ../$< $@
glob/%.h: %.h
rm -f $@
ln -s ../$< $@
glob/configure: glob/configure.in
cd glob && autoconf $(ACFLAGS)
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -m'Regenerated: autoconf $(ACFLAGS) $<' $@
endif
glob/ChangeLog: ../ChangeLog
changelog-extract --regexp 'posix/(glob|fnmatch).*' < $< > $@.new
chmod a-w $@.new
mv -f $@.new $@
ifeq ($(with-cvs),yes)
test ! -d CVS || cvs $(CVSOPTS) commit -mRegenerated $@
endif
%.Z: %
compress -c $< > $@-tmp
mv $@-tmp $@
%.gz: %
gzip -9v -c $< > $@-tmp
mv $@-tmp $@
# Run a test on the header files we use.
# XXX Please note that for now we ignore the result of this test.
tests: $(objpfx)annexc
-$(dir $<)$(notdir $<) '$(CC)' \
'-I../include -I.. $(+sysdep-includes)' > $<.out
$(objpfx)annexc: annexc.c
$(native-compile)
|
55,018,883,367,054 | null | /* Copyright (C) 1999 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <unistd.h>
#include <stdio.h>
#include <string.h>
int
main (void)
{
char *login;
int errors = 0;
login = getlogin ();
if (login == NULL)
puts ("getlogin returned NULL, no further tests");
else
{
char name[1024];
int ret;
printf ("getlogin returned: `%s'\n", login);
ret = getlogin_r (name, sizeof (name));
if (ret == 0)
{
printf ("getlogin_r returned: `%s'\n", name);
if (strcmp (name, login) != 0)
{
puts ("Error: getlogin and getlogin_r returned different names");
++errors;
}
}
else
{
printf ("Error: getlogin_r returned: %d (%s)\n",
ret, strerror (ret));
++errors;
}
}
return errors != 0;
}
|
156,577,410,779,052 | /* Reentrant function to return the current login name. Unix version.
Copyright (C) 1991, 1992, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <fcntl.h>
#include <utmp.h>
/* Return at most NAME_LEN characters of the login name of the user in NAME.
If it cannot be determined or some other error occurred, return the error
code. Otherwise return 0. */
int
getlogin_r (name, name_len)
char *name;
size_t name_len;
{
char tty_pathname[2 + 2 * NAME_MAX];
char *real_tty_path = tty_pathname;
int result = 0;
struct utmp *ut, line, buffer;
{
int d = __open ("/dev/tty", 0);
if (d < 0)
return errno;
result = __ttyname_r (d, real_tty_path, sizeof (tty_pathname));
(void) __close (d);
if (result != 0)
{
__set_errno (result);
return result;
}
}
real_tty_path += 5; /* Remove "/dev/". */
__setutent ();
strncpy (line.ut_line, real_tty_path, sizeof line.ut_line);
if (__getutline_r (&line, &buffer, &ut) < 0)
{
if (errno == ESRCH)
/* The caller expects ENOENT if nothing is found. */
result = ENOENT;
else
result = errno;
}
else
{
size_t needed = strlen (ut->ut_line) + 1;
if (needed < name_len)
{
__set_errno (ERANGE);
result = ERANGE;
}
else
{
memcpy (name, ut->ut_line, needed);
result = 0;
}
}
__endutent ();
return result;
}
| /* Reentrant function to return the current login name. Unix version.
Copyright (C) 1991, 1992, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <stdio.h>
#include <limits.h>
#include <fcntl.h>
#include <utmp.h>
/* Return at most NAME_LEN characters of the login name of the user in NAME.
If it cannot be determined or some other error occurred, return the error
code. Otherwise return 0. */
int
getlogin_r (name, name_len)
char *name;
size_t name_len;
{
char tty_pathname[2 + 2 * NAME_MAX];
char *real_tty_path = tty_pathname;
int result = 0;
struct utmp *ut, line, buffer;
/* Get name of tty connected to fd 0. Return if not a tty or
if fd 0 isn't open. Note that a lot of documentation says that
getlogin() is based on the controlling terminal---what they
really mean is "the terminal connected to standard input". The
getlogin() implementation of DEC Unix, SunOS, Solaris, HP-UX all
return NULL if fd 0 has been closed, so this is the compatible
thing to do. Note that ttyname(open("/dev/tty")) on those
systems returns /dev/tty, so that is not a possible solution for
getlogin(). */
result = __ttyname_r (0, real_tty_path, sizeof (tty_pathname));
if (result != 0)
return result;
real_tty_path += 5; /* Remove "/dev/". */
__setutent ();
strncpy (line.ut_line, real_tty_path, sizeof line.ut_line);
if (__getutline_r (&line, &buffer, &ut) < 0)
{
if (errno == ESRCH)
/* The caller expects ENOENT if nothing is found. */
result = ENOENT;
else
result = errno;
}
else
{
size_t needed = strlen (ut->ut_user) + 1;
if (needed > name_len)
{
__set_errno (ERANGE);
result = ERANGE;
}
else
{
memcpy (name, ut->ut_user, needed);
result = 0;
}
}
__endutent ();
return result;
}
|
141,531,825,318,645 | /* Bit values & structures for resource limits. Linux version.
Copyright (C) 1994, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#ifndef _SYS_RESOURCE_H
# error "Never use <bits/resource.h> directly; include <sys/resource.h> instead."
#endif
#include <asm/resource.h>
#include <bits/types.h>
/* Transmute defines to enumerations. The macro re-definitions are
necessary because some programs want to test for operating system
features with #ifdef RUSAGE_SELF. In ISO C the reflexive
definition is a no-op. */
/* Kinds of resource limit. */
enum __rlimit_resource
{
/* Per-process CPU limit, in seconds. */
_RLIMIT_CPU = RLIMIT_CPU,
#undef RLIMIT_CPU
RLIMIT_CPU = _RLIMIT_CPU,
#define RLIMIT_CPU RLIMIT_CPU
/* Largest file that can be created, in bytes. */
_RLIMIT_FSIZE = RLIMIT_FSIZE,
#undef RLIMIT_FSIZE
RLIMIT_FSIZE = _RLIMIT_FSIZE,
#define RLIMIT_FSIZE RLIMIT_FSIZE
/* Maximum size of data segment, in bytes. */
_RLIMIT_DATA = RLIMIT_DATA,
#undef RLIMIT_DATA
RLIMIT_DATA = _RLIMIT_DATA,
#define RLIMIT_DATA RLIMIT_DATA
/* Maximum size of stack segment, in bytes. */
_RLIMIT_STACK = RLIMIT_STACK,
#undef RLIMIT_STACK
RLIMIT_STACK = _RLIMIT_STACK,
#define RLIMIT_STACK RLIMIT_STACK
/* Largest core file that can be created, in bytes. */
_RLIMIT_CORE = RLIMIT_CORE,
#undef RLIMIT_CORE
RLIMIT_CORE = _RLIMIT_CORE,
#define RLIMIT_CORE RLIMIT_CORE
/* Largest resident set size, in bytes.
This affects swapping; processes that are exceeding their
resident set size will be more likely to have physical memory
taken from them. */
_RLIMIT_RSS = RLIMIT_RSS,
#undef RLIMIT_RSS
RLIMIT_RSS = _RLIMIT_RSS,
#define RLIMIT_RSS RLIMIT_RSS
/* Number of open files. */
_RLIMIT_NOFILE = RLIMIT_NOFILE,
#undef RLIMIT_NOFILE
RLIMIT_NOFILE = _RLIMIT_NOFILE,
RLIMIT_OFILE = RLIMIT_NOFILE, /* BSD name for same. */
#define RLIMIT_NOFILE RLIMIT_NOFILE
#define RLIMIT_OFILE RLIMIT_OFILE
/* Address space limit (?) */
_RLIMIT_AS = RLIMIT_AS,
#undef RLIMIT_AS
RLIMIT_AS = _RLIMIT_AS,
#define RLIMIT_AS RLIMIT_AS
/* Number of processes. */
_RLIMIT_NPROC = RLIMIT_NPROC,
#undef RLIMIT_NPROC
RLIMIT_NPROC = _RLIMIT_NPROC,
#define RLIMIT_NPROC RLIMIT_NPROC
/* Locked-in-memory address space. */
_RLIMIT_MEMLOCK = RLIMIT_MEMLOCK,
#undef RLIMIT_MEMLOCK
RLIMIT_MEMLOCK = _RLIMIT_MEMLOCK,
#define RLIMIT_MEMLOCK RLIMIT_MEMLOCK
RLIMIT_NLIMITS = RLIM_NLIMITS,
#undef RLIM_NLIMITS
RLIM_NLIMITS = RLIMIT_NLIMITS
#define RLIMIT_NLIMITS RLIMIT_NLIMITS
#define RLIM_NLIMITS RLIM_NLIMITS
};
/* Value to indicate that there is no limit. */
#ifndef __USE_FILE_OFFSET64
# define RLIM_INFINITY ((unsigned long int)(~0UL))
#else
# define RLIM_INFINITY 0xffffffffffffffffuLL
#endif
#ifdef __USE_LARGEFILE64
# define RLIM64_INFINITY 0xffffffffffffffffuLL
#endif
/* We can represent all limits. */
#define RLIM_SAVED_MAX RLIM_INFINITY
#define RLIM_SAVED_CUR RLIM_INFINITY
/* Type for resource quantity measurement. */
#ifndef __USE_FILE_OFFSET64
typedef __rlim_t rlim_t;
#else
typedef __rlim64_t rlim_t;
#endif
#ifdef __USE_LARGEFILE64
typedef __rlim64_t rlim64_t;
#endif
struct rlimit
{
/* The current (soft) limit. */
rlim_t rlim_cur;
/* The hard limit. */
rlim_t rlim_max;
};
#ifdef __USE_LARGEFILE64
struct rlimit64
{
/* The current (soft) limit. */
rlim64_t rlim_cur;
/* The hard limit. */
rlim64_t rlim_max;
};
#endif
/* Whose usage statistics do you want? */
enum __rusage_who
{
/* The calling process. */
RUSAGE_SELF = 0,
#define RUSAGE_SELF RUSAGE_SELF
/* All of its terminated child processes. */
RUSAGE_CHILDREN = -1,
#define RUSAGE_CHILDREN RUSAGE_CHILDREN
/* Both. */
RUSAGE_BOTH = -2
#define RUSAGE_BOTH RUSAGE_BOTH
};
#define __need_timeval
#include <bits/time.h> /* For `struct timeval'. */
/* Structure which says how much of each resource has been used. */
struct rusage
{
/* Total amount of user time used. */
struct timeval ru_utime;
/* Total amount of system time used. */
struct timeval ru_stime;
/* Maximum resident set size (in kilobytes). */
long int ru_maxrss;
/* Amount of sharing of text segment memory
with other processes (kilobyte-seconds). */
long int ru_ixrss;
/* Amount of data segment memory used (kilobyte-seconds). */
long int ru_idrss;
/* Amount of stack memory used (kilobyte-seconds). */
long int ru_isrss;
/* Number of soft page faults (i.e. those serviced by reclaiming
a page from the list of pages awaiting reallocation. */
long int ru_minflt;
/* Number of hard page faults (i.e. those that required I/O). */
long int ru_majflt;
/* Number of times a process was swapped out of physical memory. */
long int ru_nswap;
/* Number of input operations via the file system. Note: This
and `ru_oublock' do not include operations with the cache. */
long int ru_inblock;
/* Number of output operations via the file system. */
long int ru_oublock;
/* Number of IPC messages sent. */
long int ru_msgsnd;
/* Number of IPC messages received. */
long int ru_msgrcv;
/* Number of signals delivered. */
long int ru_nsignals;
/* Number of voluntary context switches, i.e. because the process
gave up the process before it had to (usually to wait for some
resource to be available). */
long int ru_nvcsw;
/* Number of involuntary context switches, i.e. a higher priority process
became runnable or the current process used up its time slice. */
long int ru_nivcsw;
};
/* Priority limits. */
#define PRIO_MIN -20 /* Minimum priority a process can have. */
#define PRIO_MAX 20 /* Maximum priority a process can have. */
/* The type of the WHICH argument to `getpriority' and `setpriority',
indicating what flavor of entity the WHO argument specifies. */
enum __priority_which
{
PRIO_PROCESS = 0, /* WHO is a process ID. */
PRIO_PGRP = 1, /* WHO is a process group ID. */
PRIO_USER = 2 /* WHO is a user ID. */
};
| /* Bit values & structures for resource limits. Linux version.
Copyright (C) 1994, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#ifndef _SYS_RESOURCE_H
# error "Never use <bits/resource.h> directly; include <sys/resource.h> instead."
#endif
#include <asm/resource.h>
#include <bits/types.h>
/* Transmute defines to enumerations. The macro re-definitions are
necessary because some programs want to test for operating system
features with #ifdef RUSAGE_SELF. In ISO C the reflexive
definition is a no-op. */
/* Kinds of resource limit. */
enum __rlimit_resource
{
/* Per-process CPU limit, in seconds. */
_RLIMIT_CPU = RLIMIT_CPU,
#undef RLIMIT_CPU
RLIMIT_CPU = _RLIMIT_CPU,
#define RLIMIT_CPU RLIMIT_CPU
/* Largest file that can be created, in bytes. */
_RLIMIT_FSIZE = RLIMIT_FSIZE,
#undef RLIMIT_FSIZE
RLIMIT_FSIZE = _RLIMIT_FSIZE,
#define RLIMIT_FSIZE RLIMIT_FSIZE
/* Maximum size of data segment, in bytes. */
_RLIMIT_DATA = RLIMIT_DATA,
#undef RLIMIT_DATA
RLIMIT_DATA = _RLIMIT_DATA,
#define RLIMIT_DATA RLIMIT_DATA
/* Maximum size of stack segment, in bytes. */
_RLIMIT_STACK = RLIMIT_STACK,
#undef RLIMIT_STACK
RLIMIT_STACK = _RLIMIT_STACK,
#define RLIMIT_STACK RLIMIT_STACK
/* Largest core file that can be created, in bytes. */
_RLIMIT_CORE = RLIMIT_CORE,
#undef RLIMIT_CORE
RLIMIT_CORE = _RLIMIT_CORE,
#define RLIMIT_CORE RLIMIT_CORE
/* Largest resident set size, in bytes.
This affects swapping; processes that are exceeding their
resident set size will be more likely to have physical memory
taken from them. */
_RLIMIT_RSS = RLIMIT_RSS,
#undef RLIMIT_RSS
RLIMIT_RSS = _RLIMIT_RSS,
#define RLIMIT_RSS RLIMIT_RSS
/* Number of open files. */
_RLIMIT_NOFILE = RLIMIT_NOFILE,
#undef RLIMIT_NOFILE
RLIMIT_NOFILE = _RLIMIT_NOFILE,
RLIMIT_OFILE = RLIMIT_NOFILE, /* BSD name for same. */
#define RLIMIT_NOFILE RLIMIT_NOFILE
#define RLIMIT_OFILE RLIMIT_OFILE
/* Address space limit (?) */
_RLIMIT_AS = RLIMIT_AS,
#undef RLIMIT_AS
RLIMIT_AS = _RLIMIT_AS,
#define RLIMIT_AS RLIMIT_AS
/* Number of processes. */
_RLIMIT_NPROC = RLIMIT_NPROC,
#undef RLIMIT_NPROC
RLIMIT_NPROC = _RLIMIT_NPROC,
#define RLIMIT_NPROC RLIMIT_NPROC
/* Locked-in-memory address space. */
_RLIMIT_MEMLOCK = RLIMIT_MEMLOCK,
#undef RLIMIT_MEMLOCK
RLIMIT_MEMLOCK = _RLIMIT_MEMLOCK,
#define RLIMIT_MEMLOCK RLIMIT_MEMLOCK
RLIMIT_NLIMITS = RLIM_NLIMITS,
#undef RLIM_NLIMITS
RLIM_NLIMITS = RLIMIT_NLIMITS
#define RLIMIT_NLIMITS RLIMIT_NLIMITS
#define RLIM_NLIMITS RLIM_NLIMITS
};
/* Value to indicate that there is no limit. */
#ifndef __USE_FILE_OFFSET64
# define RLIM_INFINITY ((unsigned long int)(~0UL))
#else
# define RLIM_INFINITY 0xffffffffffffffffuLL
#endif
#ifdef __USE_LARGEFILE64
# define RLIM64_INFINITY 0xffffffffffffffffuLL
#endif
/* We can represent all limits. */
#define RLIM_SAVED_MAX RLIM_INFINITY
#define RLIM_SAVED_CUR RLIM_INFINITY
/* Type for resource quantity measurement. */
#ifndef __USE_FILE_OFFSET64
typedef __rlim_t rlim_t;
#else
typedef __rlim64_t rlim_t;
#endif
#ifdef __USE_LARGEFILE64
typedef __rlim64_t rlim64_t;
#endif
struct rlimit
{
/* The current (soft) limit. */
rlim_t rlim_cur;
/* The hard limit. */
rlim_t rlim_max;
};
#ifdef __USE_LARGEFILE64
struct rlimit64
{
/* The current (soft) limit. */
rlim64_t rlim_cur;
/* The hard limit. */
rlim64_t rlim_max;
};
#endif
/* Whose usage statistics do you want? */
enum __rusage_who
{
/* The calling process. */
RUSAGE_SELF = 0,
#define RUSAGE_SELF RUSAGE_SELF
/* All of its terminated child processes. */
RUSAGE_CHILDREN = -1,
#define RUSAGE_CHILDREN RUSAGE_CHILDREN
/* Both. */
RUSAGE_BOTH = -2
#define RUSAGE_BOTH RUSAGE_BOTH
};
#define __need_timeval
#include <bits/time.h> /* For `struct timeval'. */
/* Structure which says how much of each resource has been used. */
struct rusage
{
/* Total amount of user time used. */
struct timeval ru_utime;
/* Total amount of system time used. */
struct timeval ru_stime;
/* Maximum resident set size (in kilobytes). */
long int ru_maxrss;
/* Amount of sharing of text segment memory
with other processes (kilobyte-seconds). */
long int ru_ixrss;
/* Amount of data segment memory used (kilobyte-seconds). */
long int ru_idrss;
/* Amount of stack memory used (kilobyte-seconds). */
long int ru_isrss;
/* Number of soft page faults (i.e. those serviced by reclaiming
a page from the list of pages awaiting reallocation. */
long int ru_minflt;
/* Number of hard page faults (i.e. those that required I/O). */
long int ru_majflt;
/* Number of times a process was swapped out of physical memory. */
long int ru_nswap;
/* Number of input operations via the file system. Note: This
and `ru_oublock' do not include operations with the cache. */
long int ru_inblock;
/* Number of output operations via the file system. */
long int ru_oublock;
/* Number of IPC messages sent. */
long int ru_msgsnd;
/* Number of IPC messages received. */
long int ru_msgrcv;
/* Number of signals delivered. */
long int ru_nsignals;
/* Number of voluntary context switches, i.e. because the process
gave up the process before it had to (usually to wait for some
resource to be available). */
long int ru_nvcsw;
/* Number of involuntary context switches, i.e. a higher priority process
became runnable or the current process used up its time slice. */
long int ru_nivcsw;
};
/* Priority limits. */
#define PRIO_MIN -20 /* Minimum priority a process can have. */
#define PRIO_MAX 20 /* Maximum priority a process can have. */
/* The type of the WHICH argument to `getpriority' and `setpriority',
indicating what flavor of entity the WHO argument specifies. */
enum __priority_which
{
PRIO_PROCESS = 0, /* WHO is a process ID. */
#define PRIO_PROCESS PRIO_PROCESS
PRIO_PGRP = 1, /* WHO is a process group ID. */
#define PRIO_PGRP PRIO_PGRP
PRIO_USER = 2 /* WHO is a user ID. */
#define PRIO_USER PRIO_USER
};
|
233,806,228,448,712 | # @(#)africa 7.31
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-03-22):
#
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (4th edition),
# San Diego: ACS Publications, Inc. (1995).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# Previous editions of this database used WAT, CAT, SAT, and EAT
# for +0:00 through +3:00, respectively,
# but Mark R V Murray <markm@iafrica.com> reports that
# `SAST' is the official abbreviation for +2:00 in the country of South Africa,
# `CAT' is commonly used for +2:00 in countries north of South Africa, and
# `WAT' is probably the best name for +1:00, as the common phrase for
# the area that includes Nigeria is ``West Africa''.
# He has heard of ``Western Sahara Time'' for +0:00 but can find no reference.
#
# To make things confusing, `WAT' seems to have been used for -1:00 long ago;
# I'd guess that this was because people needed _some_ name for -1:00,
# and at the time, far west Africa was the only major land area in -1:00.
# This usage is now obsolete, as the last use of -1:00 on the African
# mainland seems to have been 1976 in Western Sahara.
#
# To summarize, the following abbreviations seem to have some currency:
# -1:00 WAT West Africa Time (no longer used)
# 0:00 GMT Greenwich Mean Time
# 2:00 CAT Central Africa Time
# 2:00 SAST South Africa Standard Time
# and Murray suggests the following abbreviation:
# 1:00 WAT West Africa Time
# I realize that this leads to `WAT' being used for both -1:00 and 1:00
# for times before 1976, but this is the best I can think of
# until we get more information.
#
# I invented the following abbreviations; corrections are welcome!
# 2:00 WAST West Africa Summer Time
# 2:30 BEAT British East Africa Time (no longer used)
# 2:45 BEAUT British East Africa Unified Time (no longer used)
# 3:00 CAST Central Africa Summer Time (no longer used)
# 3:00 SAST South Africa Summer Time (no longer used)
# 3:00 EAT East Africa Time
# 4:00 EAST East Africa Summer Time (no longer used)
# Algeria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Algeria 1916 only - Jun 14 23:00s 1:00 S
Rule Algeria 1916 1919 - Oct Sun<=7 23:00s 0 -
Rule Algeria 1917 only - Mar 24 23:00s 1:00 S
Rule Algeria 1918 only - Mar 9 23:00s 1:00 S
Rule Algeria 1919 only - Mar 1 23:00s 1:00 S
Rule Algeria 1920 only - Feb 14 23:00s 1:00 S
Rule Algeria 1920 only - Oct 23 23:00s 0 -
Rule Algeria 1921 only - Mar 14 23:00s 1:00 S
Rule Algeria 1921 only - Jun 21 23:00s 0 -
Rule Algeria 1939 only - Sep 11 23:00s 1:00 S
Rule Algeria 1939 only - Nov 19 1:00 0 -
Rule Algeria 1944 1945 - Apr Mon<=7 2:00 1:00 S
Rule Algeria 1944 only - Oct 8 2:00 0 -
Rule Algeria 1945 only - Sep 16 1:00 0 -
Rule Algeria 1971 only - Apr 25 23:00s 1:00 S
Rule Algeria 1971 only - Sep 26 23:00s 0 -
Rule Algeria 1977 only - May 6 0:00 1:00 S
Rule Algeria 1977 only - Oct 21 0:00 0 -
Rule Algeria 1978 only - Mar 24 1:00 1:00 S
Rule Algeria 1978 only - Sep 22 3:00 0 -
Rule Algeria 1980 only - Apr 25 0:00 1:00 S
Rule Algeria 1980 only - Oct 31 2:00 0 -
# Shanks gives 0:09 for Paris Mean Time; go with Howse's more precise 0:09:21.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:01
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
0:00 Algeria WE%sT 1940 Feb 25 2:00
1:00 Algeria CE%sT 1946 Oct 7
0:00 - WET 1956 Jan 29
1:00 - CET 1963 Apr 14
0:00 Algeria WE%sT 1977 Oct 21
1:00 Algeria CE%sT 1979 Oct 26
0:00 Algeria WE%sT 1981 May
1:00 - CET
# Angola
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Luanda 0:52:56 - LMT 1892
0:52:04 - LMT 1911 May 26 # Luanda Mean Time?
1:00 - WAT
# Benin
# Whitman says they switched to 1:00 in 1946, not 1934; go with Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Porto-Novo 0:10:28 - LMT 1912
0:00 - GMT 1934 Feb 26
1:00 - WAT
# Botswana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Gaborone 1:43:40 - LMT 1885
2:00 - CAT 1943 Sep 19 2:00
2:00 1:00 CAST 1944 Mar 19 2:00
2:00 - CAT
# Burkina Faso
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Ouagadougou -0:06:04 - LMT 1912
0:00 - GMT
# Burundi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bujumbura 1:57:28 - LMT 1890
2:00 - CAT
# Cameroon
# Whitman says they switched to 1:00 in 1920; go with Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Douala 0:38:48 - LMT 1912
1:00 - WAT
# Cape Verde
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Cape_Verde -1:34:04 - LMT 1907 # Praia
-2:00 - CVT 1942 Sep
-2:00 1:00 CVST 1945 Oct 15
-2:00 - CVT 1975 Nov 25 2:00
-1:00 - CVT
# Central African Republic
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bangui 1:14:20 - LMT 1912
1:00 - WAT
# Chad
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Ndjamena 1:00:12 - LMT 1912
1:00 - WAT 1979 Oct 14
1:00 1:00 WAST 1980 Mar 8
1:00 - WAT
# Comoros
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Comoro 2:53:04 - LMT 1911 Jul # Moroni, Gran Comoro
3:00 - EAT
# Democratic Republic of Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kinshasa 1:01:12 - LMT 1897 Nov 9
1:00 - WAT
Zone Africa/Lubumbashi 1:49:52 - LMT 1897 Nov 9
2:00 - CAT
# Republic of the Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Brazzaville 1:01:08 - LMT 1912
1:00 - WAT
# Cote D'Ivoire
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Abidjan -0:16:08 - LMT 1912
0:00 - GMT
# Djibouti
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Djibouti 2:52:36 - LMT 1911 Jul
3:00 - EAT
###############################################################################
# Egypt
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Egypt 1940 only - Jul 15 0:00 1:00 S
Rule Egypt 1940 only - Oct 1 0:00 0 -
Rule Egypt 1941 only - Apr 15 0:00 1:00 S
Rule Egypt 1941 only - Sep 16 0:00 0 -
Rule Egypt 1942 1944 - Apr 1 0:00 1:00 S
Rule Egypt 1942 only - Oct 27 0:00 0 -
Rule Egypt 1943 1945 - Nov 1 0:00 0 -
Rule Egypt 1945 only - Apr 16 0:00 1:00 S
Rule Egypt 1957 only - May 10 0:00 1:00 S
Rule Egypt 1957 1958 - Oct 1 0:00 0 -
Rule Egypt 1958 only - May 1 0:00 1:00 S
Rule Egypt 1959 1981 - May 1 1:00 1:00 S
Rule Egypt 1959 1965 - Sep 30 3:00 0 -
Rule Egypt 1966 1994 - Oct 1 3:00 0 -
Rule Egypt 1982 only - Jul 25 1:00 1:00 S
Rule Egypt 1983 only - Jul 12 1:00 1:00 S
Rule Egypt 1984 1988 - May 1 1:00 1:00 S
Rule Egypt 1989 only - May 6 1:00 1:00 S
Rule Egypt 1990 1994 - May 1 1:00 1:00 S
# IATA (after 1990) says transitions are at 0:00.
# Go with IATA starting in 1995, except correct 1995 entry from 09-30 to 09-29.
Rule Egypt 1995 max - Apr lastFri 0:00s 1:00 S
Rule Egypt 1995 max - Sep lastThu 23:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Cairo 2:05:00 - LMT 1900 Oct
2:00 Egypt EE%sT
# Equatorial Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Malabo 0:35:08 - LMT 1912
0:00 - GMT 1963 Dec 15
1:00 - WAT
# Eritrea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Asmera 2:35:32 - LMT 1870
2:35:32 - AMT 1890 # Asmera Mean Time
2:35:20 - ADMT 1936 May 5 # Adis Dera MT
3:00 - EAT
# Ethiopia
# From Paul Eggert (1997-10-05):
# Shanks writes that Ethiopia had six narrowly-spaced time zones between
# 1870 and 1890, and that they merged to 38E50 (2:35:20) in 1890.
# We'll guess that 38E50 is for Adis Dera.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Addis_Ababa 2:34:48 - LMT 1870
2:35:20 - ADMT 1936 May 5 # Adis Dera MT
3:00 - EAT
# Gabon
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Libreville 0:37:48 - LMT 1912
1:00 - WAT
# Gambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Banjul -1:06:36 - LMT 1912
-1:06:36 - BMT 1935 # Banjul Mean Time
-1:00 - WAT 1964
0:00 - GMT
# Ghana
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman says DST was observed from 1931 to ``the present''; go with Shanks.
Rule Ghana 1936 1942 - Sep 1 0:00 0:20 GHST
Rule Ghana 1936 1942 - Dec 31 0:00 0 GMT
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Accra -0:00:52 - LMT 1918
0:00 Ghana %s
# Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Conakry -0:54:52 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960
0:00 - GMT
# Guinea-Bissau
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bissau -1:02:20 - LMT 1911 May 26
-1:00 - WAT 1975
0:00 - GMT
# Kenya
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Nairobi 2:27:16 - LMT 1928 Jul
3:00 - EAT 1930
2:30 - BEAT 1940
2:45 - BEAUT 1960
3:00 - EAT
# Lesotho
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maseru 1:50:00 - LMT 1903 Mar
2:00 - SAST 1943 Sep 19 2:00
2:00 1:00 SAST 1944 Mar 19 2:00
2:00 - SAST
# Liberia
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# In 1972 Liberia was the last country to switch
# from a UTC offset that was not a multiple of 15 minutes.
# Howse reports that it was in honor of their president's birthday.
# Shanks reports the date as May 1, whereas Howse reports Jan; go with Shanks.
# For Liberia before 1972, Shanks reports -0:44, whereas Howse and Whitman
# each report -0:44:30; go with the more precise figure.
#
# From Shanks, as corrected by Whitman:
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Monrovia -0:43:08 - LMT 1882
-0:43:08 - MMT 1919 Mar # Monrovia Mean Time
-0:44:30 - LRT 1972 May # Liberia Time
0:00 - GMT
###############################################################################
# Libya
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Libya 1951 only - Oct 14 2:00 1:00 S
Rule Libya 1952 only - Jan 1 0:00 0 -
Rule Libya 1953 only - Oct 9 2:00 1:00 S
Rule Libya 1954 only - Jan 1 0:00 0 -
Rule Libya 1955 only - Sep 30 0:00 1:00 S
Rule Libya 1956 only - Jan 1 0:00 0 -
Rule Libya 1982 1984 - Apr 1 0:00 1:00 S
Rule Libya 1982 1985 - Oct 1 0:00 0 -
Rule Libya 1985 only - Apr 6 0:00 1:00 S
Rule Libya 1986 only - Apr 4 0:00 1:00 S
Rule Libya 1986 only - Oct 3 0:00 0 -
Rule Libya 1987 1989 - Apr 1 0:00 1:00 S
Rule Libya 1987 1990 - Oct 1 0:00 0 -
Rule Libya 1990 only - May 4 0:00 1:00 S
Rule Libya 1996 only - Mar 30 2:00s 1:00 S
Rule Libya 1996 only - Sep 30 2:00s 0 -
Rule Libya 1997 only - Apr 4 0:00 1:00 S
Rule Libya 1997 only - Oct 4 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Tripoli 0:52:44 - LMT 1920
1:00 Libya CE%sT 1959
2:00 - EET 1982
1:00 Libya CE%sT 1991
2:00 - EET 1996 Mar 30 3:00
1:00 Libya CE%sT 1997 Oct 4 0:00
2:00 - EET
# Madagascar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Antananarivo 3:10:04 - LMT 1911 Jul
3:00 - EAT 1954 Feb 27 23:00s
3:00 1:00 EAST 1954 May 29 23:00s
3:00 - EAT
# Malawi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Blantyre 2:20:00 - LMT 1903 Mar
2:00 - CAT
# Mali
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bamako -0:32:00 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960 Jun 20
0:00 - GMT
# no longer different from Bamako, but too famous to omit
Zone Africa/Timbuktu -0:12:04 - LMT 1912
0:00 - GMT
# Mauritania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Nouakchott -1:03:48 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960 Nov 28
0:00 - GMT
# Mauritius
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mauritius 3:50:00 - LMT 1907 # Port Louis
4:00 - MUT # Mauritius Time
# Agalega Is, Rodriguez
# no information; probably like Indian/Mauritius
# Mayotte
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mayotte 3:00:56 - LMT 1911 Jul # Mamoutzou
3:00 - EAT
# Morocco
# See the `europe' file for Spanish Morocco (Africa/Ceuta).
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Morocco 1939 only - Sep 12 0:00 1:00 S
Rule Morocco 1939 only - Nov 19 0:00 0 -
Rule Morocco 1940 only - Feb 25 0:00 1:00 S
Rule Morocco 1945 only - Nov 18 0:00 0 -
Rule Morocco 1950 only - Jun 11 0:00 1:00 S
Rule Morocco 1950 only - Oct 29 0:00 0 -
Rule Morocco 1967 only - Jun 3 12:00 1:00 S
Rule Morocco 1967 only - Oct 1 0:00 0 -
Rule Morocco 1974 only - Jun 24 0:00 1:00 S
Rule Morocco 1974 only - Sep 1 0:00 0 -
Rule Morocco 1976 1977 - May 1 0:00 1:00 S
Rule Morocco 1976 only - Aug 1 0:00 0 -
Rule Morocco 1977 only - Sep 28 0:00 0 -
Rule Morocco 1978 only - Jun 1 0:00 1:00 S
Rule Morocco 1978 only - Aug 4 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Casablanca -0:30:20 - LMT 1913 Oct 26
0:00 Morocco WE%sT 1984 Mar 16
1:00 - CET 1986
0:00 - WET
# Western Sahara
Zone Africa/El_Aaiun -0:52:48 - LMT 1934 Jan
-1:00 - WAT 1976 Apr 14
0:00 - WET
# Mozambique
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maputo 2:10:20 - LMT 1903 Mar
2:00 - CAT
# Namibia
# Shanks says DST transitions are at 0:00; go with IATA.
# The 1994-04-03 transition is from Shanks.
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Namibia 1994 max - Sep Sun>=1 2:00 1:00 S
Rule Namibia 1995 max - Apr Sun>=1 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Windhoek 1:08:24 - LMT 1892 Feb 8
1:30 - SWAT 1903 Mar # SW Africa Time
2:00 - SAST 1942 Sep 20 2:00
2:00 1:00 SAST 1943 Mar 21 2:00
2:00 - SAST 1990 Mar 21 # independence
2:00 - CAT 1994 Apr 3
1:00 Namibia WA%sT
# Niger
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Niamey 0:08:28 - LMT 1912
-1:00 - WAT 1934 Feb 26
0:00 - GMT 1960
1:00 - WAT
# Nigeria
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lagos 0:13:36 - LMT 1919 Sep
1:00 - WAT
# Reunion
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Reunion 3:41:52 - LMT 1911 Jun # Saint-Denis
4:00 - RET # Reunion Time
#
# Scattered Islands (Iles Eparses) administered from Reunion are as follows.
# The following information about them is taken from
# Iles Eparses (www.outre-mer.gouv.fr/domtom/ile.htm, 1997-07-22, in French;
# no longer available as of 1999-08-17).
# We have no info about their time zone histories.
#
# Bassas da India - uninhabited
# Europa Island - inhabited from 1905 to 1910 by two families
# Glorioso Is - inhabited until at least 1958
# Juan de Nova - uninhabited
# Tromelin - inhabited until at least 1958
# Rwanda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kigali 2:00:16 - LMT 1935 Jun
2:00 - CAT
# St Helena
# From Paul Eggert (1997-10-05):
# Shanks says St Helena was 1W26 (-0:05:44) from 1890 to 1951,
# but this is most likely a typo for 5W42, the longitude of Jamestown.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/St_Helena -0:22:48 - LMT 1890 # Jamestown
-0:22:48 - JMT 1951 # Jamestown Mean Time
0:00 - GMT
# The other parts of the St Helena territory are similar:
# Tristan da Cunha: on GMT, say Whitman and the CIA
# Ascension: on GMT, says usno1995 and the CIA
# Gough (scientific station since 1955; sealers wintered previously):
# on GMT, says the CIA
# Inaccessible, Nightingale: no information, but probably GMT
# Sao Tome and Principe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Sao_Tome 0:26:56 - LMT 1884
-0:36:32 - LMT 1912 # Lisbon Mean Time
0:00 - GMT
# Senegal
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Dakar -1:09:44 - LMT 1912
-1:00 - WAT 1941 Jun
0:00 - GMT
# Seychelles
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mahe 3:41:48 - LMT 1906 Jun # Victoria
4:00 - SCT # Seychelles Time
# Sierra Leone
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman gives Mar 31 - Aug 31 for 1931 on; go with Shanks.
Rule SL 1935 1942 - Jun 1 0:00 0:40 SLST
Rule SL 1935 1942 - Oct 1 0:00 0 WAT
Rule SL 1957 1962 - Jun 1 0:00 1:00 SLST
Rule SL 1957 1962 - Sep 1 0:00 0 GMT
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Freetown -0:53:00 - LMT 1882
-0:53:00 - FMT 1913 Jun # Freetown Mean Time
-1:00 SL %s 1957
0:00 SL %s
# Somalia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mogadishu 3:01:28 - LMT 1893 Nov
3:00 - EAT 1931
2:30 - BEAT 1957
3:00 - EAT
# South Africa
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule SA 1942 1943 - Sep Sun>=15 2:00 1:00 -
Rule SA 1943 1944 - Mar Sun>=15 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Johannesburg 1:52:00 - LMT 1892 Feb 8
1:30 - SAST 1903 Mar
2:00 SA SAST
# Shanks erroneously claims that most of South Africa switched to 1:00
# on 1994-04-03 at 00:00.
#
# Marion and Prince Edward Is
# scientific station since 1947
# no information
# Sudan
# From Michael Ross <mross@antigone.com> (1995-11-15):
# Sudan no longer observes any form of daylight time change.
# I verified this today by telephone with the Sudan Mission to the
# United Nations: 212-573-6033
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Sudan 1970 only - May 1 0:00 1:00 S
Rule Sudan 1970 1985 - Oct 15 0:00 0 -
Rule Sudan 1971 only - Apr 30 0:00 1:00 S
Rule Sudan 1972 1985 - Apr lastSun 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Khartoum 2:10:08 - LMT 1931
2:00 Sudan CA%sT
# Swaziland
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mbabane 2:04:24 - LMT 1903 Mar
2:00 - SAST
# Tanzania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Dar_es_Salaam 2:37:08 - LMT 1931
3:00 - EAT 1948
2:45 - BEAUT 1961
3:00 - EAT
# Togo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lome 0:04:52 - LMT 1893
0:00 - GMT
# Tunisia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Tunisia 1939 only - Apr 15 23:00s 1:00 S
Rule Tunisia 1939 only - Nov 18 23:00s 0 -
Rule Tunisia 1940 only - Feb 25 23:00s 1:00 S
Rule Tunisia 1941 only - Oct 6 0:00 0 -
Rule Tunisia 1942 only - Mar 9 0:00 1:00 S
Rule Tunisia 1942 only - Nov 2 3:00 0 -
Rule Tunisia 1943 only - Mar 29 2:00 1:00 S
Rule Tunisia 1943 only - Apr 17 2:00 0 -
Rule Tunisia 1943 only - Apr 25 2:00 1:00 S
Rule Tunisia 1943 only - Oct 4 2:00 0 -
Rule Tunisia 1944 1945 - Apr Mon>=1 2:00 1:00 S
Rule Tunisia 1944 only - Oct 8 0:00 0 -
Rule Tunisia 1945 only - Sep 16 0:00 0 -
Rule Tunisia 1977 only - Apr 30 0:00s 1:00 S
Rule Tunisia 1977 only - Sep 24 0:00s 0 -
Rule Tunisia 1978 only - May 1 0:00s 1:00 S
Rule Tunisia 1978 only - Oct 1 0:00s 0 -
Rule Tunisia 1988 only - Jun 1 0:00s 1:00 S
Rule Tunisia 1988 1990 - Sep lastSun 0:00s 0 -
Rule Tunisia 1989 only - Mar 26 0:00s 1:00 S
Rule Tunisia 1990 only - May 1 0:00s 1:00 S
# Shanks gives 0:09 for Paris Mean Time; go with Howse's more precise 0:09:21.
# Shanks says the 1911 switch occurred on Mar 9; go with Howse's Mar 11.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Tunis 0:40:44 - LMT 1881 May 12
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
1:00 Tunisia CE%sT
# Uganda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kampala 2:09:40 - LMT 1928 Jul
3:00 - EAT 1930
2:30 - BEAT 1948
2:45 - BEAUT 1957
3:00 - EAT
# Zambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lusaka 1:53:08 - LMT 1903 Mar
2:00 - CAT
# Zimbabwe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Harare 2:04:12 - LMT 1903 Mar
2:00 - CAT
| # @(#)africa 7.32
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-03-22):
#
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (5th edition),
# San Diego: ACS Publications, Inc. (1999).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# Previous editions of this database used WAT, CAT, SAT, and EAT
# for +0:00 through +3:00, respectively,
# but Mark R V Murray <markm@iafrica.com> reports that
# `SAST' is the official abbreviation for +2:00 in the country of South Africa,
# `CAT' is commonly used for +2:00 in countries north of South Africa, and
# `WAT' is probably the best name for +1:00, as the common phrase for
# the area that includes Nigeria is ``West Africa''.
# He has heard of ``Western Sahara Time'' for +0:00 but can find no reference.
#
# To make things confusing, `WAT' seems to have been used for -1:00 long ago;
# I'd guess that this was because people needed _some_ name for -1:00,
# and at the time, far west Africa was the only major land area in -1:00.
# This usage is now obsolete, as the last use of -1:00 on the African
# mainland seems to have been 1976 in Western Sahara.
#
# To summarize, the following abbreviations seem to have some currency:
# -1:00 WAT West Africa Time (no longer used)
# 0:00 GMT Greenwich Mean Time
# 2:00 CAT Central Africa Time
# 2:00 SAST South Africa Standard Time
# and Murray suggests the following abbreviation:
# 1:00 WAT West Africa Time
# I realize that this leads to `WAT' being used for both -1:00 and 1:00
# for times before 1976, but this is the best I can think of
# until we get more information.
#
# I invented the following abbreviations; corrections are welcome!
# 2:00 WAST West Africa Summer Time
# 2:30 BEAT British East Africa Time (no longer used)
# 2:45 BEAUT British East Africa Unified Time (no longer used)
# 3:00 CAST Central Africa Summer Time (no longer used)
# 3:00 SAST South Africa Summer Time (no longer used)
# 3:00 EAT East Africa Time
# 4:00 EAST East Africa Summer Time (no longer used)
# Algeria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Algeria 1916 only - Jun 14 23:00s 1:00 S
Rule Algeria 1916 1919 - Oct Sun<=7 23:00s 0 -
Rule Algeria 1917 only - Mar 24 23:00s 1:00 S
Rule Algeria 1918 only - Mar 9 23:00s 1:00 S
Rule Algeria 1919 only - Mar 1 23:00s 1:00 S
Rule Algeria 1920 only - Feb 14 23:00s 1:00 S
Rule Algeria 1920 only - Oct 23 23:00s 0 -
Rule Algeria 1921 only - Mar 14 23:00s 1:00 S
Rule Algeria 1921 only - Jun 21 23:00s 0 -
Rule Algeria 1939 only - Sep 11 23:00s 1:00 S
Rule Algeria 1939 only - Nov 19 1:00 0 -
Rule Algeria 1944 1945 - Apr Mon<=7 2:00 1:00 S
Rule Algeria 1944 only - Oct 8 2:00 0 -
Rule Algeria 1945 only - Sep 16 1:00 0 -
Rule Algeria 1971 only - Apr 25 23:00s 1:00 S
Rule Algeria 1971 only - Sep 26 23:00s 0 -
Rule Algeria 1977 only - May 6 0:00 1:00 S
Rule Algeria 1977 only - Oct 21 0:00 0 -
Rule Algeria 1978 only - Mar 24 1:00 1:00 S
Rule Algeria 1978 only - Sep 22 3:00 0 -
Rule Algeria 1980 only - Apr 25 0:00 1:00 S
Rule Algeria 1980 only - Oct 31 2:00 0 -
# Shanks gives 0:09 for Paris Mean Time; go with Howse's more precise 0:09:21.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:01
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
0:00 Algeria WE%sT 1940 Feb 25 2:00
1:00 Algeria CE%sT 1946 Oct 7
0:00 - WET 1956 Jan 29
1:00 - CET 1963 Apr 14
0:00 Algeria WE%sT 1977 Oct 21
1:00 Algeria CE%sT 1979 Oct 26
0:00 Algeria WE%sT 1981 May
1:00 - CET
# Angola
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Luanda 0:52:56 - LMT 1892
0:52:04 - LMT 1911 May 26 # Luanda Mean Time?
1:00 - WAT
# Benin
# Whitman says they switched to 1:00 in 1946, not 1934; go with Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Porto-Novo 0:10:28 - LMT 1912
0:00 - GMT 1934 Feb 26
1:00 - WAT
# Botswana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Gaborone 1:43:40 - LMT 1885
2:00 - CAT 1943 Sep 19 2:00
2:00 1:00 CAST 1944 Mar 19 2:00
2:00 - CAT
# Burkina Faso
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Ouagadougou -0:06:04 - LMT 1912
0:00 - GMT
# Burundi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bujumbura 1:57:28 - LMT 1890
2:00 - CAT
# Cameroon
# Whitman says they switched to 1:00 in 1920; go with Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Douala 0:38:48 - LMT 1912
1:00 - WAT
# Cape Verde
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Cape_Verde -1:34:04 - LMT 1907 # Praia
-2:00 - CVT 1942 Sep
-2:00 1:00 CVST 1945 Oct 15
-2:00 - CVT 1975 Nov 25 2:00
-1:00 - CVT
# Central African Republic
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bangui 1:14:20 - LMT 1912
1:00 - WAT
# Chad
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Ndjamena 1:00:12 - LMT 1912
1:00 - WAT 1979 Oct 14
1:00 1:00 WAST 1980 Mar 8
1:00 - WAT
# Comoros
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Comoro 2:53:04 - LMT 1911 Jul # Moroni, Gran Comoro
3:00 - EAT
# Democratic Republic of Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kinshasa 1:01:12 - LMT 1897 Nov 9
1:00 - WAT
Zone Africa/Lubumbashi 1:49:52 - LMT 1897 Nov 9
2:00 - CAT
# Republic of the Congo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Brazzaville 1:01:08 - LMT 1912
1:00 - WAT
# Cote D'Ivoire
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Abidjan -0:16:08 - LMT 1912
0:00 - GMT
# Djibouti
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Djibouti 2:52:36 - LMT 1911 Jul
3:00 - EAT
###############################################################################
# Egypt
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Egypt 1940 only - Jul 15 0:00 1:00 S
Rule Egypt 1940 only - Oct 1 0:00 0 -
Rule Egypt 1941 only - Apr 15 0:00 1:00 S
Rule Egypt 1941 only - Sep 16 0:00 0 -
Rule Egypt 1942 1944 - Apr 1 0:00 1:00 S
Rule Egypt 1942 only - Oct 27 0:00 0 -
Rule Egypt 1943 1945 - Nov 1 0:00 0 -
Rule Egypt 1945 only - Apr 16 0:00 1:00 S
Rule Egypt 1957 only - May 10 0:00 1:00 S
Rule Egypt 1957 1958 - Oct 1 0:00 0 -
Rule Egypt 1958 only - May 1 0:00 1:00 S
Rule Egypt 1959 1981 - May 1 1:00 1:00 S
Rule Egypt 1959 1965 - Sep 30 3:00 0 -
Rule Egypt 1966 1994 - Oct 1 3:00 0 -
Rule Egypt 1982 only - Jul 25 1:00 1:00 S
Rule Egypt 1983 only - Jul 12 1:00 1:00 S
Rule Egypt 1984 1988 - May 1 1:00 1:00 S
Rule Egypt 1989 only - May 6 1:00 1:00 S
Rule Egypt 1990 1994 - May 1 1:00 1:00 S
# IATA (after 1990) says transitions are at 0:00.
# Go with IATA starting in 1995, except correct 1995 entry from 09-30 to 09-29.
Rule Egypt 1995 max - Apr lastFri 0:00s 1:00 S
Rule Egypt 1995 max - Sep lastThu 23:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Cairo 2:05:00 - LMT 1900 Oct
2:00 Egypt EE%sT
# Equatorial Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Malabo 0:35:08 - LMT 1912
0:00 - GMT 1963 Dec 15
1:00 - WAT
# Eritrea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Asmera 2:35:32 - LMT 1870
2:35:32 - AMT 1890 # Asmera Mean Time
2:35:20 - ADMT 1936 May 5 # Adis Dera MT
3:00 - EAT
# Ethiopia
# From Paul Eggert (1997-10-05):
# Shanks writes that Ethiopia had six narrowly-spaced time zones between
# 1870 and 1890, and that they merged to 38E50 (2:35:20) in 1890.
# We'll guess that 38E50 is for Adis Dera.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Addis_Ababa 2:34:48 - LMT 1870
2:35:20 - ADMT 1936 May 5 # Adis Dera MT
3:00 - EAT
# Gabon
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Libreville 0:37:48 - LMT 1912
1:00 - WAT
# Gambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Banjul -1:06:36 - LMT 1912
-1:06:36 - BMT 1935 # Banjul Mean Time
-1:00 - WAT 1964
0:00 - GMT
# Ghana
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman says DST was observed from 1931 to ``the present''; go with Shanks.
Rule Ghana 1936 1942 - Sep 1 0:00 0:20 GHST
Rule Ghana 1936 1942 - Dec 31 0:00 0 GMT
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Accra -0:00:52 - LMT 1918
0:00 Ghana %s
# Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Conakry -0:54:52 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960
0:00 - GMT
# Guinea-Bissau
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bissau -1:02:20 - LMT 1911 May 26
-1:00 - WAT 1975
0:00 - GMT
# Kenya
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Nairobi 2:27:16 - LMT 1928 Jul
3:00 - EAT 1930
2:30 - BEAT 1940
2:45 - BEAUT 1960
3:00 - EAT
# Lesotho
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maseru 1:50:00 - LMT 1903 Mar
2:00 - SAST 1943 Sep 19 2:00
2:00 1:00 SAST 1944 Mar 19 2:00
2:00 - SAST
# Liberia
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# In 1972 Liberia was the last country to switch
# from a UTC offset that was not a multiple of 15 minutes.
# Howse reports that it was in honor of their president's birthday.
# Shanks reports the date as May 1, whereas Howse reports Jan; go with Shanks.
# For Liberia before 1972, Shanks reports -0:44, whereas Howse and Whitman
# each report -0:44:30; go with the more precise figure.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Monrovia -0:43:08 - LMT 1882
-0:43:08 - MMT 1919 Mar # Monrovia Mean Time
-0:44:30 - LRT 1972 May # Liberia Time
0:00 - GMT
###############################################################################
# Libya
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Libya 1951 only - Oct 14 2:00 1:00 S
Rule Libya 1952 only - Jan 1 0:00 0 -
Rule Libya 1953 only - Oct 9 2:00 1:00 S
Rule Libya 1954 only - Jan 1 0:00 0 -
Rule Libya 1955 only - Sep 30 0:00 1:00 S
Rule Libya 1956 only - Jan 1 0:00 0 -
Rule Libya 1982 1984 - Apr 1 0:00 1:00 S
Rule Libya 1982 1985 - Oct 1 0:00 0 -
Rule Libya 1985 only - Apr 6 0:00 1:00 S
Rule Libya 1986 only - Apr 4 0:00 1:00 S
Rule Libya 1986 only - Oct 3 0:00 0 -
Rule Libya 1987 1989 - Apr 1 0:00 1:00 S
Rule Libya 1987 1990 - Oct 1 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Tripoli 0:52:44 - LMT 1920
1:00 Libya CE%sT 1959
2:00 - EET 1982
1:00 Libya CE%sT 1990 May 4
# The following entries are all from Shanks;
# the IATA SSIM data contain some obvious errors.
2:00 - EET 1996 Sep 30
1:00 - CET 1997 Apr 4
1:00 1:00 CEST 1997 Oct 4
2:00 - EET
# Madagascar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Antananarivo 3:10:04 - LMT 1911 Jul
3:00 - EAT 1954 Feb 27 23:00s
3:00 1:00 EAST 1954 May 29 23:00s
3:00 - EAT
# Malawi
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Blantyre 2:20:00 - LMT 1903 Mar
2:00 - CAT
# Mali
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Bamako -0:32:00 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960 Jun 20
0:00 - GMT
# no longer different from Bamako, but too famous to omit
Zone Africa/Timbuktu -0:12:04 - LMT 1912
0:00 - GMT
# Mauritania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Nouakchott -1:03:48 - LMT 1912
0:00 - GMT 1934 Feb 26
-1:00 - WAT 1960 Nov 28
0:00 - GMT
# Mauritius
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mauritius 3:50:00 - LMT 1907 # Port Louis
4:00 - MUT # Mauritius Time
# Agalega Is, Rodriguez
# no information; probably like Indian/Mauritius
# Mayotte
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mayotte 3:00:56 - LMT 1911 Jul # Mamoutzou
3:00 - EAT
# Morocco
# See the `europe' file for Spanish Morocco (Africa/Ceuta).
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Morocco 1939 only - Sep 12 0:00 1:00 S
Rule Morocco 1939 only - Nov 19 0:00 0 -
Rule Morocco 1940 only - Feb 25 0:00 1:00 S
Rule Morocco 1945 only - Nov 18 0:00 0 -
Rule Morocco 1950 only - Jun 11 0:00 1:00 S
Rule Morocco 1950 only - Oct 29 0:00 0 -
Rule Morocco 1967 only - Jun 3 12:00 1:00 S
Rule Morocco 1967 only - Oct 1 0:00 0 -
Rule Morocco 1974 only - Jun 24 0:00 1:00 S
Rule Morocco 1974 only - Sep 1 0:00 0 -
Rule Morocco 1976 1977 - May 1 0:00 1:00 S
Rule Morocco 1976 only - Aug 1 0:00 0 -
Rule Morocco 1977 only - Sep 28 0:00 0 -
Rule Morocco 1978 only - Jun 1 0:00 1:00 S
Rule Morocco 1978 only - Aug 4 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Casablanca -0:30:20 - LMT 1913 Oct 26
0:00 Morocco WE%sT 1984 Mar 16
1:00 - CET 1986
0:00 - WET
# Western Sahara
Zone Africa/El_Aaiun -0:52:48 - LMT 1934 Jan
-1:00 - WAT 1976 Apr 14
0:00 - WET
# Mozambique
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Maputo 2:10:20 - LMT 1903 Mar
2:00 - CAT
# Namibia
# The 1994-04-03 transition is from Shanks.
# Shanks reports no DST after 1998-04; go with IATA.
# RULE NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Namibia 1994 max - Sep Sun>=1 2:00 1:00 S
Rule Namibia 1995 max - Apr Sun>=1 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Windhoek 1:08:24 - LMT 1892 Feb 8
1:30 - SWAT 1903 Mar # SW Africa Time
2:00 - SAST 1942 Sep 20 2:00
2:00 1:00 SAST 1943 Mar 21 2:00
2:00 - SAST 1990 Mar 21 # independence
2:00 - CAT 1994 Apr 3
1:00 Namibia WA%sT
# Niger
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Niamey 0:08:28 - LMT 1912
-1:00 - WAT 1934 Feb 26
0:00 - GMT 1960
1:00 - WAT
# Nigeria
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lagos 0:13:36 - LMT 1919 Sep
1:00 - WAT
# Reunion
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Reunion 3:41:52 - LMT 1911 Jun # Saint-Denis
4:00 - RET # Reunion Time
#
# Scattered Islands (Iles Eparses) administered from Reunion are as follows.
# The following information about them is taken from
# Iles Eparses (www.outre-mer.gouv.fr/domtom/ile.htm, 1997-07-22, in French;
# no longer available as of 1999-08-17).
# We have no info about their time zone histories.
#
# Bassas da India - uninhabited
# Europa Island - inhabited from 1905 to 1910 by two families
# Glorioso Is - inhabited until at least 1958
# Juan de Nova - uninhabited
# Tromelin - inhabited until at least 1958
# Rwanda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kigali 2:00:16 - LMT 1935 Jun
2:00 - CAT
# St Helena
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/St_Helena -0:22:48 - LMT 1890 # Jamestown
-0:22:48 - JMT 1951 # Jamestown Mean Time
0:00 - GMT
# The other parts of the St Helena territory are similar:
# Tristan da Cunha: on GMT, say Whitman and the CIA
# Ascension: on GMT, says usno1995 and the CIA
# Gough (scientific station since 1955; sealers wintered previously):
# on GMT, says the CIA
# Inaccessible, Nightingale: no information, but probably GMT
# Sao Tome and Principe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Sao_Tome 0:26:56 - LMT 1884
-0:36:32 - LMT 1912 # Lisbon Mean Time
0:00 - GMT
# Senegal
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Dakar -1:09:44 - LMT 1912
-1:00 - WAT 1941 Jun
0:00 - GMT
# Seychelles
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Mahe 3:41:48 - LMT 1906 Jun # Victoria
4:00 - SCT # Seychelles Time
# Sierra Leone
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman gives Mar 31 - Aug 31 for 1931 on; go with Shanks.
Rule SL 1935 1942 - Jun 1 0:00 0:40 SLST
Rule SL 1935 1942 - Oct 1 0:00 0 WAT
Rule SL 1957 1962 - Jun 1 0:00 1:00 SLST
Rule SL 1957 1962 - Sep 1 0:00 0 GMT
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Freetown -0:53:00 - LMT 1882
-0:53:00 - FMT 1913 Jun # Freetown Mean Time
-1:00 SL %s 1957
0:00 SL %s
# Somalia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mogadishu 3:01:28 - LMT 1893 Nov
3:00 - EAT 1931
2:30 - BEAT 1957
3:00 - EAT
# South Africa
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule SA 1942 1943 - Sep Sun>=15 2:00 1:00 -
Rule SA 1943 1944 - Mar Sun>=15 2:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Johannesburg 1:52:00 - LMT 1892 Feb 8
1:30 - SAST 1903 Mar
2:00 SA SAST
# Marion and Prince Edward Is
# scientific station since 1947
# no information
# Sudan
# From Michael Ross <mross@antigone.com> (1995-11-15):
# Sudan no longer observes any form of daylight time change.
# I verified this today by telephone with the Sudan Mission to the
# United Nations: 212-573-6033
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Sudan 1970 only - May 1 0:00 1:00 S
Rule Sudan 1970 1985 - Oct 15 0:00 0 -
Rule Sudan 1971 only - Apr 30 0:00 1:00 S
Rule Sudan 1972 1985 - Apr lastSun 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Khartoum 2:10:08 - LMT 1931
2:00 Sudan CA%sT
# Swaziland
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Mbabane 2:04:24 - LMT 1903 Mar
2:00 - SAST
# Tanzania
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Dar_es_Salaam 2:37:08 - LMT 1931
3:00 - EAT 1948
2:45 - BEAUT 1961
3:00 - EAT
# Togo
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lome 0:04:52 - LMT 1893
0:00 - GMT
# Tunisia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Tunisia 1939 only - Apr 15 23:00s 1:00 S
Rule Tunisia 1939 only - Nov 18 23:00s 0 -
Rule Tunisia 1940 only - Feb 25 23:00s 1:00 S
Rule Tunisia 1941 only - Oct 6 0:00 0 -
Rule Tunisia 1942 only - Mar 9 0:00 1:00 S
Rule Tunisia 1942 only - Nov 2 3:00 0 -
Rule Tunisia 1943 only - Mar 29 2:00 1:00 S
Rule Tunisia 1943 only - Apr 17 2:00 0 -
Rule Tunisia 1943 only - Apr 25 2:00 1:00 S
Rule Tunisia 1943 only - Oct 4 2:00 0 -
Rule Tunisia 1944 1945 - Apr Mon>=1 2:00 1:00 S
Rule Tunisia 1944 only - Oct 8 0:00 0 -
Rule Tunisia 1945 only - Sep 16 0:00 0 -
Rule Tunisia 1977 only - Apr 30 0:00s 1:00 S
Rule Tunisia 1977 only - Sep 24 0:00s 0 -
Rule Tunisia 1978 only - May 1 0:00s 1:00 S
Rule Tunisia 1978 only - Oct 1 0:00s 0 -
Rule Tunisia 1988 only - Jun 1 0:00s 1:00 S
Rule Tunisia 1988 1990 - Sep lastSun 0:00s 0 -
Rule Tunisia 1989 only - Mar 26 0:00s 1:00 S
Rule Tunisia 1990 only - May 1 0:00s 1:00 S
# Shanks gives 0:09 for Paris Mean Time; go with Howse's more precise 0:09:21.
# Shanks says the 1911 switch occurred on Mar 9; go with Howse's Mar 11.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Tunis 0:40:44 - LMT 1881 May 12
0:09:21 - PMT 1911 Mar 11 # Paris Mean Time
1:00 Tunisia CE%sT
# Uganda
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Kampala 2:09:40 - LMT 1928 Jul
3:00 - EAT 1930
2:30 - BEAT 1948
2:45 - BEAUT 1957
3:00 - EAT
# Zambia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Lusaka 1:53:08 - LMT 1903 Mar
2:00 - CAT
# Zimbabwe
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Africa/Harare 2:04:12 - LMT 1903 Mar
2:00 - CAT
|
79,361,839,694,850 | # @(#)asia 7.48
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-03-22):
#
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (4th edition),
# San Diego: ACS Publications, Inc. (1995).
# Except where otherwise noted, it is the source for the data below.
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst
# LMT Local Mean Time
# 2:00 EET EEST Eastern European Time
# 2:00 IST IDT Israel
# 3:00 AST ADT Arabia*
# 4:00 GST Gulf*
# 5:30 IST India
# 7:00 ICT Indochina*
# 8:00 CST China
# 9:00 CJT Central Japanese Time (1896/1937)*
# 9:00 JST Japan
# 9:00 KST Korea
# 9:30 CST (Australian) Central Standard Time
#
# See the `europe' file for Russia and Turkey in Asia.
# From Guy Harris:
# Incorporates data for Singapore from Robert Elz' asia 1.1, as well as
# additional information from Tom Yap, Sun Microsystems Intercontinental
# Technical Support (including a page from the Official Airline Guide -
# Worldwide Edition). The names for time zones are guesses.
###############################################################################
# These rules are stolen from the `europe' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EUAsia 1981 max - Mar lastSun 1:00u 1:00 S
Rule EUAsia 1996 max - Oct lastSun 1:00u 0 -
Rule E-EurAsia 1981 max - Mar lastSun 0:00 1:00 S
Rule E-EurAsia 1979 1995 - Sep lastSun 0:00 0 -
Rule E-EurAsia 1996 max - Oct lastSun 0:00 0 -
Rule RussiaAsia 1981 1984 - Apr 1 0:00 1:00 S
Rule RussiaAsia 1981 1983 - Oct 1 0:00 0 -
Rule RussiaAsia 1984 1991 - Sep lastSun 2:00s 0 -
Rule RussiaAsia 1985 1991 - Mar lastSun 2:00s 1:00 S
Rule RussiaAsia 1992 only - Mar lastSat 23:00 1:00 S
Rule RussiaAsia 1992 only - Sep lastSat 23:00 0 -
Rule RussiaAsia 1993 max - Mar lastSun 2:00s 1:00 S
Rule RussiaAsia 1993 1995 - Sep lastSun 2:00s 0 -
Rule RussiaAsia 1996 max - Oct lastSun 2:00s 0 -
# Afghanistan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kabul 4:36:48 - LMT 1890
4:00 - AFT 1945
4:30 - AFT
# Armenia
# From Paul Eggert (1999-09-27):
# Shanks has Yerevan switching to 3:00 (with Russian DST) in spring 1991,
# but usno1995 has Armenia at 4:00 (with DST), and Edgar Der-Danieliantz
# <edd@AIC.NET> reported (1996-05-04) that Yerevan probably wouldn't use DST
# in 1996, though it did use DST in 1995. IATA SSIM (1991/1998) reports that
# Armenia switched from 3:00 to 4:00 in 1998 and observed DST after 1991,
# but started switching at 3:00s in 1998. IATA SSIM (1999-02) reports
# that they switch one day later in 2001 (i.e. on Mondays).
# What a mess! We guess Yerevan DST stayed in sync with Moscow between 1990
# and 1995, did not use DST in 1996, and started using DST again in 1997.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Yerevan 2:58:00 - LMT 1924 May 2
3:00 - YERT 1957 Mar # Yerevan Time
4:00 RussiaAsia YER%sT 1991 Mar 31 2:00s
3:00 1:00 YERST 1991 Sep 23 # independence
3:00 1:00 AMST 1991 Sep 29 2:00s # Armenia Time
3:00 - AMT 1992 Jan 19 2:00s
4:00 RussiaAsia AM%sT 1996
4:00 - AMT 1997
4:00 RussiaAsia AM%sT
# Azerbaijan
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Azer 1997 max - Mar lastSun 1:00 1:00 S
Rule Azer 1997 max - Oct lastSun 1:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Baku 3:19:24 - LMT 1924 May 2
3:00 - BAKT 1957 Mar # Baku Time
4:00 RussiaAsia BAK%sT 1991 Mar 31 2:00s
3:00 1:00 BAKST 1991 Aug 30 # independence
3:00 RussiaAsia AZ%sT 1992 Sep lastSun 2:00s
4:00 - AZT 1996 # Azerbaijan time
4:00 EUAsia AZ%sT 1997
4:00 Azer AZ%sT
# Bahrain
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bahrain 3:22:20 - LMT 1920 # Al-Manamah
4:00 - GST 1972 Jun
3:00 - AST
# Bangladesh
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dacca 6:01:40 - LMT 1890
5:53:20 - HMT 1941 Oct # Howrah Mean Time?
6:30 - BURT 1942 May 15 # Burma Time
5:30 - IST 1942 Sep
6:30 - BURT 1951 Sep 30
6:00 - DACT 1971 Mar 26 # Dacca Time
6:00 - BDT # Bangladesh Time
# Bhutan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Thimbu 5:58:36 - LMT 1947 Aug 15
5:30 - IST 1987 Oct
6:00 - BTT # Bhutan Time
# British Indian Ocean Territory
# From Whitman:
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Chagos 5:00 - IOT # BIOT Time
# Brunei
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Brunei 7:39:40 - LMT 1926 Mar # Bandar Seri Begawan
7:30 - BNT 1933
8:00 - BNT
# Burma / Myanmar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Rangoon 6:24:40 - LMT 1880 # or Yangon
6:24:36 - RMT 1920 # Rangoon Mean Time?
6:30 - BURT 1942 May # Burma Time
9:00 - JST 1945 May 3
6:30 - MMT # Myanmar Time
# Cambodia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Phnom_Penh 6:59:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# China
# From Guy Harris:
# People's Republic of China. Yes, they really have only one time zone.
# From Bob Devine (1988-01-28):
# No they don't. See TIME mag, 1986-02-17 p.52. Even though
# China is across 4 physical time zones, before Feb 1, 1986 only the
# Peking (Bejing) time zone was recognized. Since that date, China
# has two of 'em -- Peking's and Urumqi (named after the capital of
# the Xinjiang Uighur Autonomous Region). I don't know about DST for it.
#
# . . .I just deleted the DST table and this editor makes it too
# painful to suck in another copy.. So, here is what I have for
# DST start/end dates for Peking's time zone (info from AP):
#
# 1986 May 4 - Sept 14
# 1987 mid-April - ??
# From U. S. Naval Observatory (1989-01-19):
# CHINA 8 H AHEAD OF UTC ALL OF CHINA, INCL TAIWAN
# CHINA 9 H AHEAD OF UTC APR 17 - SEP 10
# From Paul Eggert <eggert@twinsun.com> (1995-12-19):
# Shanks writes that China has had a single time zone since 1980 May 1,
# observing summer DST from 1986 through 1991; this contradicts Devine's
# note about Time magazine, though apparently _something_ happened in 1986.
# Go with Shanks for now. I made up names for the other pre-1980 time zones.
# From Shanks (1995):
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Shang 1940 only - Jun 3 0:00 1:00 D
Rule Shang 1940 1941 - Oct 1 0:00 0 S
Rule Shang 1941 only - Mar 16 0:00 1:00 D
Rule PRC 1949 only - Jan 1 0:00 0 S
Rule PRC 1986 only - May 4 0:00 1:00 D
Rule PRC 1986 1991 - Sep Sun>=11 0:00 0 S
Rule PRC 1987 1991 - Apr Sun>=10 0:00 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Harbin 8:26:44 - LMT 1928
8:30 - HART 1932 Mar # Harbin Time
8:00 - CST 1940
9:00 - HART 1966 May
8:30 - HART 1980 May
8:00 PRC C%sT
Zone Asia/Shanghai 8:05:52 - LMT 1928
8:00 Shang C%sT 1949
8:00 PRC C%sT
Zone Asia/Chungking 7:06:20 - LMT 1928
7:00 - CHUT 1980 May # Chungking Time
8:00 PRC C%sT
Zone Asia/Urumqi 5:50:20 - LMT 1928
6:00 - URUT 1980 May # Urumqi Time
8:00 PRC C%sT
Zone Asia/Kashgar 5:03:56 - LMT 1928
5:30 - KAST 1940 # Kashgar Time
5:00 - KAST 1980 May
8:00 PRC C%sT
# Hong Kong
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule HK 1946 only - Apr 20 3:30 1:00 S
Rule HK 1946 only - Dec 1 3:30 0 -
Rule HK 1947 only - Apr 13 3:30 1:00 S
Rule HK 1947 only - Dec 30 3:30 0 -
Rule HK 1948 only - May 2 3:30 1:00 S
Rule HK 1948 1952 - Oct lastSun 3:30 0 -
Rule HK 1949 1953 - Apr Sun>=1 3:30 1:00 S
Rule HK 1953 only - Nov 1 3:30 0 -
Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S
Rule HK 1954 only - Oct 31 3:30 0 -
Rule HK 1955 1964 - Nov Sun>=1 3:30 0 -
Rule HK 1965 1977 - Apr Sun>=16 3:30 1:00 S
Rule HK 1965 1977 - Oct Sun>=16 3:30 0 -
Rule HK 1979 1980 - May Sun>=8 3:30 1:00 S
Rule HK 1979 1980 - Oct Sun>=16 3:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Hong_Kong 7:36:36 - LMT 1904 Oct 30
8:00 HK HK%sT 1997 Jul 1 # return to China
8:00 PRC C%sT
###############################################################################
# Taiwan
# Shanks (1995) writes that Taiwan observed DST during 1945, when it
# was still controlled by Japan. This is hard to believe, but we don't
# have any other information.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Taiwan 1945 1951 - May 1 0:00 1:00 D
Rule Taiwan 1945 1951 - Oct 1 0:00 0 S
Rule Taiwan 1952 only - Mar 1 0:00 1:00 D
Rule Taiwan 1952 1954 - Nov 1 0:00 0 S
Rule Taiwan 1953 1959 - Apr 1 0:00 1:00 D
Rule Taiwan 1955 1961 - Oct 1 0:00 0 S
Rule Taiwan 1960 1961 - Jun 1 0:00 1:00 D
Rule Taiwan 1974 1975 - Apr 1 0:00 1:00 D
Rule Taiwan 1974 1975 - Oct 1 0:00 0 S
Rule Taiwan 1980 only - Jun 30 0:00 1:00 D
Rule Taiwan 1980 only - Sep 30 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Taipei 8:06:00 - LMT 1896
8:00 Taiwan C%sT
# Macao
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Macao 1961 1962 - Mar Sun>=16 3:30 1:00 S
Rule Macao 1961 1964 - Nov Sun>=1 3:30 0 -
Rule Macao 1963 only - Mar Sun>=16 0:00 1:00 S
Rule Macao 1964 only - Mar Sun>=16 3:30 1:00 S
Rule Macao 1965 only - Mar Sun>=16 0:00 1:00 S
Rule Macao 1965 only - Oct 31 0:00 0 -
Rule Macao 1966 1971 - Apr Sun>=16 3:30 1:00 S
Rule Macao 1966 1971 - Oct Sun>=16 3:30 0 -
Rule Macao 1972 1974 - Apr Sun>=15 0:00 1:00 S
Rule Macao 1972 1973 - Oct Sun>=15 0:00 0 -
Rule Macao 1974 1977 - Oct Sun>=15 3:30 0 -
Rule Macao 1975 1977 - Apr Sun>=15 3:30 1:00 S
Rule Macao 1978 1980 - Apr Sun>=15 0:00 1:00 S
Rule Macao 1978 1980 - Oct Sun>=15 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Macao 7:34:20 - LMT 1912
8:00 Macao MO%sT 1999 Dec 20 # return to China
8:00 PRC C%sT
###############################################################################
# Cyprus
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cyprus 1975 only - Apr 13 0:00 1:00 S
Rule Cyprus 1975 only - Oct 12 0:00 0 -
Rule Cyprus 1976 only - May 15 0:00 1:00 S
Rule Cyprus 1976 only - Oct 11 0:00 0 -
Rule Cyprus 1977 1980 - Apr Sun>=1 0:00 1:00 S
Rule Cyprus 1977 only - Sep 25 0:00 0 -
Rule Cyprus 1978 only - Oct 2 0:00 0 -
Rule Cyprus 1979 1997 - Sep lastSun 0:00 0 -
Rule Cyprus 1981 1998 - Mar lastSun 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Nicosia 2:13:28 - LMT 1921 Nov 14
2:00 Cyprus EE%sT 1998 Sep
2:00 EUAsia EE%sT
# IATA SSIM (1998-09) has Cyprus using EU rules for the first time.
# Georgia
# From Paul Eggert <eggert@twinsun.com> (1994-11-19):
# Today's _Economist_ (p 60) reports that Georgia moved its clocks forward
# an hour recently, due to a law proposed by Zurab Murvanidze,
# an MP who went on a hunger strike for 11 days to force discussion about it!
# We have no details, but we'll guess they didn't move the clocks back in fall.
#
# From Mathew Englander <mathew@io.org>, quoting AP (1996-10-23 13:05-04):
# Instead of putting back clocks at the end of October, Georgia
# will stay on daylight savings time this winter to save energy,
# President Eduard Shevardnadze decreed Wednesday.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tbilisi 2:59:16 - LMT 1880
2:59:16 - TBMT 1924 May 2 # Tbilisi Mean Time
3:00 - TBIT 1957 Mar # Tbilisi Time
4:00 RussiaAsia TBI%sT 1991 Mar 31 2:00s
3:00 1:00 TBIST 1991 Apr 9 # independence
3:00 RussiaAsia GE%sT 1992 # Georgia Time
3:00 E-EurAsia GE%sT 1994 Sep lastSun
4:00 E-EurAsia GE%sT 1996 Oct lastSun
4:00 1:00 GEST 1997 Mar lastSun
4:00 E-EurAsia GE%sT
# India
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Calcutta 5:53:28 - LMT 1880
5:53:20 - HMT 1941 Oct # Howrah Mean Time?
6:30 - BURT 1942 May 15 # Burma Time
5:30 - IST 1942 Sep
5:30 1:00 IST 1945 Oct 15
5:30 - IST
# The following are like Asia/Calcutta:
# Andaman Is
# Lakshadweep (Laccadive, Minicoy and Amindivi Is)
# Nicobar Is
# Indonesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Jakarta 7:07:12 - LMT 1867 Aug 10
7:07:12 - JMT 1924 Jan 1 0:13 # Jakarta MT
7:20 - JAVT 1932 Nov # Java Time
7:30 - JAVT 1942 Mar 23
9:00 - JST 1945 Aug
7:30 - JAVT 1948 May
8:00 - JAVT 1950 May
7:30 - JAVT 1964
7:00 - JAVT
Zone Asia/Ujung_Pandang 7:57:36 - LMT 1920
7:57:36 - MMT 1932 Nov # Macassar MT
8:00 - BORT 1942 Feb 9 # Borneo Time
9:00 - JST 1945 Aug
8:00 - BORT
Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov
9:00 - JAYT 1944 # Jayapura Time
9:30 - CST 1964
9:00 - JAYT
# Iran
# From Paul Eggert (1999-09-27), following up a suggestion by Rich Wales:
# Ahmea Alavi in
# <a href="http://tehran.stanford.edu/Iran_Lib/Calendar/taghveem.txt">
# TAGHVEEM (1993-07-12)
# </a>
# writes ``Daylight saving time in Iran starts from the first day
# of Farvardin and ends the first day of Mehr.'' This disagrees with the SSIM:
#
# DST start DST end
# year SSIM Alavi SSIM Alavi
# 1991 05-03!= 03-21 09-20!= 09-23
# 1992 03-22!= 03-21 09-23 09-23
# 1993 03-21 03-21 09-23 09-23
# 1994 03-21 03-21 09-22!= 09-23
# 1995 03-21 03-21 09-22!= 09-23
# 1996 03-21!= 03-20 09-21!= 09-22
# 1997 03-21 03-21 09-21!= 09-23
# 1998 03-21 03-21 09-21!= 09-23
# 1999 03-22!= 03-21 09-22!= 09-23
# 2000 03-21!= 03-20 09-21!= 09-22
# 2001 03-17!= 03-21 09-19!= 09-23
#
# Go with Alavi starting with 1992.
# I used Ed Reingold's cal-persia in GNU Emacs 19.34 to compute Persian dates.
# The Persian calendar is based on the sun, and dates after around 2050
# are approximate; stop after 2037 when 32-bit time_t's overflow.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iran 1978 1980 - Mar 21 0:00 1:00 S
Rule Iran 1978 only - Oct 21 0:00 0 -
Rule Iran 1979 only - Sep 19 0:00 0 -
Rule Iran 1980 only - Sep 23 0:00 0 -
Rule Iran 1991 only - May 3 0:00s 1:00 S
Rule Iran 1991 only - Sep 20 0:00s 0 -
Rule Iran 1992 1995 - Mar 21 0:00 1:00 S
Rule Iran 1992 1995 - Sep 23 0:00 0 -
Rule Iran 1996 only - Mar 20 0:00 1:00 S
Rule Iran 1996 only - Sep 22 0:00 0 -
Rule Iran 1997 1999 - Mar 21 0:00 1:00 S
Rule Iran 1997 1999 - Sep 23 0:00 0 -
Rule Iran 2000 only - Mar 20 0:00 1:00 S
Rule Iran 2000 only - Sep 22 0:00 0 -
Rule Iran 2001 2003 - Mar 21 0:00 1:00 S
Rule Iran 2001 2003 - Sep 23 0:00 0 -
Rule Iran 2004 only - Mar 20 0:00 1:00 S
Rule Iran 2004 only - Sep 22 0:00 0 -
Rule Iran 2005 2007 - Mar 21 0:00 1:00 S
Rule Iran 2005 2007 - Sep 23 0:00 0 -
Rule Iran 2008 only - Mar 20 0:00 1:00 S
Rule Iran 2008 only - Sep 22 0:00 0 -
Rule Iran 2009 2011 - Mar 21 0:00 1:00 S
Rule Iran 2009 2011 - Sep 23 0:00 0 -
Rule Iran 2012 only - Mar 20 0:00 1:00 S
Rule Iran 2012 only - Sep 22 0:00 0 -
Rule Iran 2013 2015 - Mar 21 0:00 1:00 S
Rule Iran 2013 2015 - Sep 23 0:00 0 -
Rule Iran 2016 only - Mar 20 0:00 1:00 S
Rule Iran 2016 only - Sep 22 0:00 0 -
Rule Iran 2017 2019 - Mar 21 0:00 1:00 S
Rule Iran 2017 2019 - Sep 23 0:00 0 -
Rule Iran 2020 only - Mar 20 0:00 1:00 S
Rule Iran 2020 only - Sep 22 0:00 0 -
Rule Iran 2021 2023 - Mar 21 0:00 1:00 S
Rule Iran 2021 2023 - Sep 23 0:00 0 -
Rule Iran 2024 2025 - Mar 20 0:00 1:00 S
Rule Iran 2024 2025 - Sep 22 0:00 0 -
Rule Iran 2026 2027 - Mar 21 0:00 1:00 S
Rule Iran 2026 2027 - Sep 23 0:00 0 -
Rule Iran 2028 2029 - Mar 20 0:00 1:00 S
Rule Iran 2028 2029 - Sep 22 0:00 0 -
Rule Iran 2030 2031 - Mar 21 0:00 1:00 S
Rule Iran 2030 2031 - Sep 23 0:00 0 -
Rule Iran 2032 2033 - Mar 20 0:00 1:00 S
Rule Iran 2032 2033 - Sep 22 0:00 0 -
Rule Iran 2034 2035 - Mar 21 0:00 1:00 S
Rule Iran 2034 2035 - Sep 23 0:00 0 -
Rule Iran 2036 2037 - Mar 20 0:00 1:00 S
Rule Iran 2036 2037 - Sep 22 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tehran 3:25:44 - LMT 1916
3:25:44 - TMT 1946 # Tehran Mean Time
3:30 - IRT 1977 Nov
4:00 Iran IR%sT 1979
3:30 Iran IR%sT
# Iraq
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iraq 1982 only - May 1 0:00 1:00 D
Rule Iraq 1982 1984 - Oct 1 0:00 0 S
Rule Iraq 1983 only - Mar 31 0:00 1:00 D
Rule Iraq 1984 1985 - Apr 1 0:00 1:00 D
Rule Iraq 1985 1990 - Sep lastSun 1:00s 0 S
Rule Iraq 1986 1990 - Mar lastSun 1:00s 1:00 D
# IATA SSIM (1991/1996) says Apr 1 12:01am UTC; guess the `:01' is a typo.
Rule Iraq 1991 max - Apr 1 3:00s 1:00 D
Rule Iraq 1991 max - Oct 1 3:00s 0 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Baghdad 2:57:40 - LMT 1890
2:57:36 - BMT 1918 # Baghdad Mean Time?
3:00 - AST 1982 May
3:00 Iraq A%sT
###############################################################################
# Israel
# From U. S. Naval Observatory (1989-01-19):
# ISRAEL 2 H AHEAD OF UTC
# ISRAEL 3 H AHEAD OF UTC APR 10 - SEP 3
# From Shanks (1995):
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1940 only - Jun 1 0:00 1:00 D
Rule Zion 1942 1944 - Nov 1 0:00 0 S
Rule Zion 1943 only - Apr 1 2:00 1:00 D
Rule Zion 1944 only - Apr 1 0:00 1:00 D
Rule Zion 1945 only - Apr 16 0:00 1:00 D
Rule Zion 1945 only - Nov 1 2:00 0 S
Rule Zion 1946 only - Apr 16 2:00 1:00 D
Rule Zion 1946 only - Nov 1 0:00 0 S
Rule Zion 1948 only - May 23 0:00 2:00 DD
Rule Zion 1948 only - Sep 1 0:00 1:00 D
Rule Zion 1948 1949 - Nov 1 2:00 0 S
Rule Zion 1949 only - May 1 0:00 1:00 D
Rule Zion 1950 only - Apr 16 0:00 1:00 D
Rule Zion 1950 only - Sep 15 3:00 0 S
Rule Zion 1951 only - Apr 1 0:00 1:00 D
Rule Zion 1951 only - Nov 11 3:00 0 S
Rule Zion 1952 only - Apr 20 2:00 1:00 D
Rule Zion 1952 only - Oct 19 3:00 0 S
Rule Zion 1953 only - Apr 12 2:00 1:00 D
Rule Zion 1953 only - Sep 13 3:00 0 S
Rule Zion 1954 only - Jun 13 0:00 1:00 D
Rule Zion 1954 only - Sep 12 0:00 0 S
Rule Zion 1955 only - Jun 11 2:00 1:00 D
Rule Zion 1955 only - Sep 11 0:00 0 S
Rule Zion 1956 only - Jun 3 0:00 1:00 D
Rule Zion 1956 only - Sep 30 3:00 0 S
Rule Zion 1957 only - Apr 29 2:00 1:00 D
Rule Zion 1957 only - Sep 22 0:00 0 S
Rule Zion 1974 only - Jul 7 0:00 1:00 D
Rule Zion 1974 only - Oct 13 0:00 0 S
Rule Zion 1975 only - Apr 20 0:00 1:00 D
Rule Zion 1975 only - Aug 31 0:00 0 S
Rule Zion 1985 only - Apr 14 0:00 1:00 D
Rule Zion 1985 only - Sep 15 0:00 0 S
Rule Zion 1986 only - May 18 0:00 1:00 D
Rule Zion 1986 only - Sep 7 0:00 0 S
Rule Zion 1987 only - Apr 15 0:00 1:00 D
Rule Zion 1987 only - Sep 13 0:00 0 S
Rule Zion 1988 only - Apr 9 0:00 1:00 D
Rule Zion 1988 only - Sep 3 0:00 0 S
# From Ephraim Silverberg <ephraim@cs.huji.ac.il>
# (1997-03-04, 1998-03-16 and 1998-12-28):
# According to the Office of the Secretary General of the Ministry of
# Interior, there is NO set rule for Daylight-Savings/Standard time changes.
# One thing is entrenched in law, however: that there must be at least 150
# days of daylight savings time annually. From 1993-1998, the change to
# daylight savings time was on a Friday morning from midnight IST to
# 1 a.m IDT; up until 1998, the change back to standard time was on a
# Saturday night from midnight daylight savings time to 11 p.m. standard
# time. 1996 is an exception to this rule where the change back to standard
# time took place on Sunday night instead of Saturday night to avoid
# conflicts with the Jewish New Year. Starting in 1999, the change to
# daylight savings time will still be on a Friday morning but from
# 2 a.m. IST to 3 a.m. IDT; furthermore, the change back to standard time
# will now also be on a Friday morning from 2 a.m. IDT to 1 a.m. IST.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1989 only - Apr 30 0:00 1:00 D
Rule Zion 1989 only - Sep 3 0:00 0 S
Rule Zion 1990 only - Mar 25 0:00 1:00 D
Rule Zion 1990 only - Aug 26 0:00 0 S
Rule Zion 1991 only - Mar 24 0:00 1:00 D
Rule Zion 1991 only - Sep 1 0:00 0 S
Rule Zion 1992 only - Mar 29 0:00 1:00 D
Rule Zion 1992 only - Sep 6 0:00 0 S
Rule Zion 1993 only - Apr 2 0:00 1:00 D
Rule Zion 1993 only - Sep 5 0:00 0 S
# The dates for 1994-1995 were obtained from Office of the Spokeswoman for the
# Ministry of Interior, Jerusalem, Israel. The spokeswoman can be reached by
# calling the office directly at 972-2-6701447 or 972-2-6701448.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1994 only - Apr 1 0:00 1:00 D
Rule Zion 1994 only - Aug 28 0:00 0 S
Rule Zion 1995 only - Mar 31 0:00 1:00 D
Rule Zion 1995 only - Sep 3 0:00 0 S
# The dates for 1996 were determined by the Minister of Interior of the
# time, Haim Ramon. The official announcement regarding 1996-1998
# (with the dates for 1997-1998 no longer being relevant) can be viewed at:
#
# ftp://ftp.huji.ac.il/pub/tz/announcements/1996-1998.ramon.ps.gz
#
# The dates for 1997-1998 were altered by his successor, Rabbi Eli Suissa.
#
# The official announcements for the years 1997-1999 can be viewed at:
#
# ftp://ftp.huji.ac.il/pub/tz/announcements/YYYY.ps.gz
#
# where YYYY is the relevant year.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1996 only - Mar 15 0:00 1:00 D
Rule Zion 1996 only - Sep 16 0:00 0 S
Rule Zion 1997 only - Mar 21 0:00 1:00 D
Rule Zion 1997 only - Sep 14 0:00 0 S
Rule Zion 1998 only - Mar 20 0:00 1:00 D
Rule Zion 1998 only - Sep 6 0:00 0 S
Rule Zion 1999 only - Apr 2 2:00 1:00 D
Rule Zion 1999 only - Sep 3 2:00 0 S
# Due to imminent elections in 1999, there are no dates for the year 2000
# and beyond. There was a move to legislate the DST rules in Israel, but
# due to the government's fall, it most likely won't be brought to the Knesset
# for first reading before the elections and will probably be altered by the
# newly elected government.
# From Paul Eggert (1999-01-30):
# Here are guesses for rules after 1999.
# They are probably wrong, but they are more likely than no DST at all.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 2000 max - Apr Fri>=1 2:00 1:00 D
Rule Zion 2000 max - Sep Fri>=1 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Jerusalem 2:20:56 - LMT 1880
2:20:40 - JMT 1918 # Jerusalem Mean Time?
2:00 Zion I%sT
###############################################################################
# Japan
# `9:00' and `JST' is from Guy Harris.
# From Paul Eggert <eggert@twinsun.com> (1995-03-06):
# Today's _Asahi Evening News_ (page 4) reports that Japan had
# daylight saving between 1948 and 1951, but ``the system was discontinued
# because the public believed it would lead to longer working hours.''
# Shanks writes that daylight saving in Japan during those years was as follows:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
#Rule Japan 1948 only - May Sun>=1 2:00 1:00 D
#Rule Japan 1948 1951 - Sep Sat>=8 2:00 0 S
#Rule Japan 1949 only - Apr Sun>=1 2:00 1:00 D
#Rule Japan 1950 1951 - May Sun>=1 2:00 1:00 D
# but the only locations using it were US military bases.
# We go with Shanks and omit daylight saving in those years for Asia/Tokyo.
# From Hideyuki Suzuki (1998-11-09):
# 'Tokyo' usually stands for the former location of Tokyo Astronomical
# Observatory: E 139 44' 40".90 (9h 18m 58s.727), N 35 39' 16".0.
# This data is from 'Rika Nenpyou (Chronological Scientific Tables) 1996'
# edited by National Astronomical Observatory of Japan....
# JST (Japan Standard Time) has been used since 1888-01-01 00:00 (JST).
# The law is enacted on 1886-07-07.
# From Hideyuki Suzuki (1998-11-16):
# The ordinance No. 51 (1886) established "standard time" in Japan,
# which stands for the time on E 135 degree.
# In the ordinance No. 167 (1895), "standard time" was renamed to "central
# standard time". And the same ordinance also established "western standard
# time", which stands for the time on E 120 degree.... But "western standard
# time" was abolished in the ordinance No. 529 (1937). In the ordinance No.
# 167, there is no mention regarding for what place western standard time is
# standard....
#
# I wrote "ordinance" above, but I don't know how to translate.
# In Japanese it's "chokurei", which means ordinance from emperor.
# Shanks claims JST in use since 1896, and that a few places (e.g. Ishigaki)
# use +0800; go with Suzuki. Guess that all ordinances took effect on Jan 1.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tokyo 9:18:59 - LMT 1887 Dec 31 15:00u
9:00 - JST 1896
9:00 - CJT 1938
9:00 - JST
# Since 1938, all Japanese possessions have been like Asia/Tokyo.
# Jordan
#
# From
# <a href="http://star.arabia.com/990701/JO9.html">
# Jordan Week (1999-07-01)
# </a> via Steffen Thorsen (1999-09-09):
# Clocks in Jordan were forwarded one hour on Wednesday at midnight,
# in accordance with the government's decision to implement summer time
# all year round.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Jordan 1973 only - Jun 6 0:00 1:00 S
Rule Jordan 1973 1975 - Oct 1 0:00 0 -
Rule Jordan 1974 1977 - May 1 0:00 1:00 S
Rule Jordan 1976 only - Nov 1 0:00 0 -
Rule Jordan 1977 only - Oct 1 0:00 0 -
Rule Jordan 1978 only - Apr 30 0:00 1:00 S
Rule Jordan 1978 only - Sep 30 0:00 0 -
Rule Jordan 1985 only - Apr 1 0:00 1:00 S
Rule Jordan 1985 only - Oct 1 0:00 0 -
Rule Jordan 1986 1988 - Apr Fri>=1 0:00 1:00 S
Rule Jordan 1986 1990 - Oct Fri>=1 0:00 0 -
Rule Jordan 1989 only - May 8 0:00 1:00 S
Rule Jordan 1990 only - Apr 27 0:00 1:00 S
Rule Jordan 1991 only - Apr 17 0:00 1:00 S
Rule Jordan 1991 only - Sep 27 0:00 0 -
Rule Jordan 1992 only - Apr 10 0:00 1:00 S
Rule Jordan 1992 1993 - Oct Fri>=1 0:00 0 -
Rule Jordan 1993 1998 - Apr Fri>=1 0:00 1:00 S
Rule Jordan 1994 only - Sep Fri>=15 0:00 0 -
Rule Jordan 1995 1998 - Sep Fri>=15 0:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Amman 2:23:44 - LMT 1931
2:00 Jordan EE%sT 1999 Jul
2:00 1:00 EEST
# Kazakhstan
# From Paul Eggert (1996-11-22):
# Andrew Evtichov <evti@chevron.com> (1996-04-13) writes that Kazakhstan
# stayed in sync with Moscow after 1990, and that Aqtobe (formerly Aktyubinsk)
# and Aqtau (formerly Shevchenko) are the largest cities in their zones.
# Guess that Aqtau and Aqtobe diverged in 1995, since that's the first time
# IATA SSIM mentions a third time zone in Kazakhstan.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Almaty 5:07:48 - LMT 1924 May 2 # or Alma-Ata
5:00 - ALMT 1957 Mar # Alma-Ata Time
6:00 RussiaAsia ALM%sT 1991 Mar 31 2:00s
5:00 1:00 ALMST 1991 Sep 29 2:00s
5:00 - ALMT 1992 Jan 19 2:00s
6:00 E-EurAsia ALM%sT
Zone Asia/Aqtobe 3:48:40 - LMT 1924 May 2
4:00 - AKT 1957 Mar # Aktyubinsk Time
5:00 RussiaAsia AK%sT 1991 Mar 31 2:00s
4:00 1:00 AKTST 1991 Sep 29 2:00s
4:00 - AQTT 1992 Jan 19 2:00s # Aqtobe Time
5:00 E-EurAsia AQT%sT
Zone Asia/Aqtau 3:21:04 - LMT 1924 May 2 # or Aktau
4:00 - SHET 1957 Mar # Fort Shevchenko Time
5:00 RussiaAsia SHE%sT 1991 Mar 31 2:00s
4:00 1:00 AQTST 1991 Sep 29 2:00s
4:00 - AQTT 1992 Jan 19 2:00s # Aqtau Time
5:00 E-EurAsia AQT%sT 1995 Sep lastSun
4:00 E-EurAsia AQT%sT
# Kirgizstan
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Kirgiz 1992 1996 - Apr Sun>=7 0:00 1:00 S
Rule Kirgiz 1991 1996 - Sep lastSun 0:00 0 -
Rule Kirgiz 1997 max - Mar lastSun 2:30 1:00 S
Rule Kirgiz 1997 max - Oct lastSun 2:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bishkek 4:58:24 - LMT 1924 May 2
5:00 - FRUT 1930 Jun 21 # Frunze Time
6:00 RussiaAsia FRU%sT 1991 Mar 31 2:00s
5:00 1:00 FRUST 1991 Aug 31 # independence
5:00 Kirgiz KG%sT # Kirgizstan Time
###############################################################################
# Korea (North and South)
# From Guy Harris:
# According to someone at the Korean Times in San Francisco,
# Daylight Savings Time was not observed until 1987. He did not know
# at what time of day DST starts or ends.
# From Shanks (1995):
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule ROK 1960 only - May 15 0:00 1:00 D
Rule ROK 1960 only - Sep 13 0:00 0 S
Rule ROK 1987 1988 - May Sun<=14 0:00 1:00 D
Rule ROK 1987 1988 - Oct Sun<=14 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Seoul 8:27:52 - LMT 1890
8:30 - KST 1904 Dec
9:00 - KST 1928
8:30 - KST 1932
9:00 - KST 1954 Mar 21
8:00 ROK K%sT 1961 Aug 10
8:30 - KST 1968 Oct
9:00 ROK K%sT
Zone Asia/Pyongyang 8:23:00 - LMT 1890
8:30 - KST 1904 Dec
9:00 - KST 1928
8:30 - KST 1932
9:00 - KST 1954 Mar 21
8:00 - KST 1961 Aug 10
9:00 - KST
###############################################################################
# Kuwait
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kuwait 3:11:56 - LMT 1950
3:00 - AST
# Laos
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Vientiane 6:50:24 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# Lebanon
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Lebanon 1920 only - Mar 28 0:00 1:00 S
Rule Lebanon 1920 only - Oct 25 0:00 0 -
Rule Lebanon 1921 only - Apr 3 0:00 1:00 S
Rule Lebanon 1921 only - Oct 3 0:00 0 -
Rule Lebanon 1922 only - Mar 26 0:00 1:00 S
Rule Lebanon 1922 only - Oct 8 0:00 0 -
Rule Lebanon 1923 only - Apr 22 0:00 1:00 S
Rule Lebanon 1923 only - Sep 16 0:00 0 -
Rule Lebanon 1957 1961 - May 1 0:00 1:00 S
Rule Lebanon 1957 1961 - Oct 1 0:00 0 -
Rule Lebanon 1972 only - Jun 22 0:00 1:00 S
Rule Lebanon 1972 1977 - Oct 1 0:00 0 -
Rule Lebanon 1973 1977 - May 1 0:00 1:00 S
Rule Lebanon 1978 only - Apr 30 0:00 1:00 S
Rule Lebanon 1978 only - Sep 30 0:00 0 -
Rule Lebanon 1984 1987 - May 1 0:00 1:00 S
Rule Lebanon 1984 1991 - Oct 16 0:00 0 -
Rule Lebanon 1988 only - Jun 1 0:00 1:00 S
Rule Lebanon 1989 only - May 10 0:00 1:00 S
Rule Lebanon 1990 1992 - May 1 0:00 1:00 S
Rule Lebanon 1992 only - Oct 4 0:00 0 -
Rule Lebanon 1993 max - Mar lastSun 0:00 1:00 S
Rule Lebanon 1993 1998 - Sep lastSun 0:00 0 -
Rule Lebanon 1999 max - Oct lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Beirut 2:22:00 - LMT 1880
2:00 Lebanon EE%sT
# Malaysia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NBorneo 1935 1941 - Sep 14 0:00 0:20 TS
Rule NBorneo 1935 1941 - Dec 14 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kuala_Lumpur 6:46:48 - LMT 1880
6:55:24 - SMT 1905 Jun # Singapore Mean Time
7:00 - MALT 1933 # Malaya Time
7:20 - MALT 1942 Feb 15
9:00 - JST 1945 Sep 2
7:20 - MALT 1950
7:30 - MALT 1982 May
8:00 - MYT # Malaysia Time
Zone Asia/Kuching 7:21:20 - LMT 1926 Mar
7:30 - BORT 1933 # Borneo Time
8:00 NBorneo BOR%sT 1942
9:00 - JST 1945 Sep 2
8:00 - BORT 1982 May
8:00 - MYT
# Maldives
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Maldives 4:54:00 - LMT 1880 # Male
4:54:00 - MMT 1960 # Male Mean Time
5:00 - MVT # Maldives Time
# Mongolia
# Shanks says that Mongolia has three time zones, but usno1995 and the CIA map
# Standard Time Zones of the World (1997-01)
# </a>
# both say that it has just one.
# Let's comment out the western and eastern Mongolian time zones
# till we know what their principal towns are.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Mongol 1981 1984 - Apr 1 0:00 1:00 S
Rule Mongol 1981 1984 - Oct 1 0:00 0 -
Rule Mongol 1985 1990 - Mar lastSun 2:00 1:00 S
Rule Mongol 1985 1990 - Sep lastSun 3:00 0 -
Rule Mongol 1991 max - Mar lastSun 0:00 1:00 S
Rule Mongol 1991 1995 - Sep lastSun 0:00 0 -
Rule Mongol 1996 only - Oct Fri>=22 0:00 0 -
Rule Mongol 1997 max - Sep lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#Zone Asia/Dariv 6:14:32 - LMT 1905 Aug
# 6:00 - DART 1978 # Dariv Time
# 7:00 Mongol DAR%sT
Zone Asia/Ulan_Bator 7:07:32 - LMT 1905 Aug
7:00 - ULAT 1978 # Ulan Bator Time
8:00 Mongol ULA%sT
#Zone Asia/Baruun-Urt 7:33:00 - LMT 1905 Aug
# 8:00 - BART 1978 # Baruun-Urt Time
# 9:00 Mongol BAR%sT
# Nepal
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Katmandu 5:41:16 - LMT 1920
5:30 - IST 1986
5:45 - NPT # Nepal Time
# Oman
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Muscat 3:54:20 - LMT 1920
4:00 - GST
# Pakistan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Karachi 4:28:12 - LMT 1907
5:30 - IST 1942 Sep
5:30 1:00 IST 1945 Oct 15
5:30 - IST 1951 Sep 30
5:00 - KART 1971 Mar 26 # Karachi Time
5:00 - PKT # Pakistan Time
# Palestine
# From Amos Shapir <amos@nsof.co.il> (1998-02-15):
#
# From 1917 until 1948-05-15, all of Palestine, including the parts now
# known as the Gaza Strip and the West Bank, was under British rule.
# Therefore the rules given for Israel for that period, apply there too...
#
# The Gaza Strip was under Egyptian rule between 1948-05-15 until 1967-06-05
# (except a short occupation by Israel from 1956-11 till 1957-03, but no
# time zone was affected then). It was never formally annexed to Egypt,
# though.
#
# The rest of Palestine was under Jordanian rule at that time, formally
# annexed in 1950 as the West Bank (and the word "Trans" was dropped from
# the country's previous name of "the Hashemite Kingdom of the
# Trans-Jordan"). So the rules for Jordan for that time apply. Major
# towns in that area are Nablus (Shchem), El-Halil (Hebron), Ramallah, and
# East Jerusalem.
#
# Both areas were occupied by Israel in June 1967, but not annexed (except
# for East Jerusalem). They were on Israel time since then; there might
# have been a Military Governor's order about time zones, but I'm not aware
# of any (such orders may have been issued semi-annually whenever summer
# time was in effect, but maybe the legal aspect of time was just neglected).
#
# The Palestinian Authority was established in 1993, and got hold of most
# towns in the West Bank and Gaza by 1995. I know that in order to
# demonstrate...independence, they have been switching to
# summer time and back on a different schedule than Israel's, but I don't
# know when this was started, or what algorithm is used (most likely the
# Jordanian one).
#
# To summarize, the table should probably look something like that:
#
# Area \ when | 1918-1947 | 1948-1967 | 1967-1995 | 1996-
# ------------+-----------+-----------+-----------+-----------
# Israel | Zion | Zion | Zion | Zion
# West bank | Zion | Jordan | Zion | Jordan
# Gaza | Zion | Egypt | Zion | Jordan
#
# I guess more info may be available from the PA's web page (if/when they
# have one).
# From Paul Eggert (1998-02-25):
# Shanks writes that Gaza did not observe DST until 1957, but we'll go
# with Shapir and assume that it observed DST from 1940 through 1947,
# and that it used Jordanian rules starting in 1996.
# We don't yet need a separate entry for the West Bank, since
# the only differences between it and Gaza that we know about
# occurred before our cutoff date of 1970.
# However, as we get more information, we may need to add entries
# for parts of the West Bank as they transitioned from Israel's rules
# to Palestine's rules. If you have more info about this, please
# send it to tz@elsie.nci.nih.gov for incorporation into future editions.
# From IINS News Service - Israel - 1998-03-23 10:38:07 Israel time,
# forwarded by Ephraim Silverberg:
#
# Despite the fact that Israel changed over to daylight savings time
# last week, the PLO Authority (PA) has decided not to turn its clocks
# one-hour forward at this time. As a sign of independence from Israeli rule,
# the PA has decided to implement DST in April.
# From Paul Eggert (1999-09-20):
# Daoud Kuttab writes in
# <a href="http://www.jpost.com/com/Archive/22.Apr.1999/Opinion/Article-2.html">
# Holiday havoc
# </a> (Jerusalem Post, 1999-04-22) that
# the Palestinian National Authority changed to DST on 1999-04-15.
# I vaguely recall that they switch back in October (sorry, forgot the source).
# For now, let's assume that the spring switch was at 24:00,
# and that they switch at 0:00 on the 3rd Fridays of April and October.
# The rules for Egypt are stolen from the `africa' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EgyptAsia 1957 only - May 10 0:00 1:00 S
Rule EgyptAsia 1957 1958 - Oct 1 0:00 0 -
Rule EgyptAsia 1958 only - May 1 0:00 1:00 S
Rule EgyptAsia 1959 1967 - May 1 1:00 1:00 S
Rule EgyptAsia 1959 1965 - Sep 30 3:00 0 -
Rule EgyptAsia 1966 only - Oct 1 3:00 0 -
Rule Palestine 1999 max - Apr Fri>=15 0:00 1:00 S
Rule Palestine 1999 max - Oct Fri>=15 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Gaza 2:17:52 - LMT 1900 Oct
2:00 Zion EET 1948 May 15
2:00 EgyptAsia EE%sT 1967 Jun 5
2:00 Zion I%sT 1996
2:00 Jordan EE%sT 1999
2:00 Palestine EE%sT
# Paracel Is
# no information
# Philippines
# Howse writes (p 153) that until 1844 the Philippines kept American date.
# The rest of this data is from Shanks.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Phil 1936 only - Nov 1 0:00 1:00 S
Rule Phil 1937 only - Feb 1 0:00 0 -
Rule Phil 1954 only - Apr 12 0:00 1:00 S
Rule Phil 1954 only - Jul 1 0:00 0 -
Rule Phil 1978 only - Mar 22 0:00 1:00 S
Rule Phil 1978 only - Sep 21 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Manila -15:56:00 - LMT 1844
8:04:00 - LMT 1899 May 11
8:00 Phil PH%sT 1942 May
9:00 - JST 1944 Nov
8:00 Phil PH%sT
# Qatar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Qatar 3:26:08 - LMT 1920 # Al Dawhah
4:00 - GST 1972 Jun
3:00 - AST
# Saudi Arabia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Riyadh 3:06:52 - LMT 1950
3:00 - AST
# Singapore
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Singapore 6:55:24 - LMT 1880
6:55:24 - SMT 1905 Jun # Singapore Mean Time
7:00 - MALT 1933 # Malaya Time
7:20 - MALT 1942 Feb 15
9:00 - JST 1945 Sep 2
7:20 - MALT 1950
7:30 - MALT 1965 Aug 9 # independence
7:30 - SGT 1982 May # Singapore Time
8:00 - SGT
# Spratly Is
# no information
# Sri Lanka
# From Paul Eggert (1996-09-03):
# "Sri Lanka advances clock by an hour to avoid blackout"
# (www.virtual-pc.com/lankaweb/news/items/240596-2.html, 1996-05-24,
# no longer available as of 1999-08-17)
# reported ``the country's standard time will be put forward by one hour at
# midnight Friday (1830 GMT) `in the light of the present power crisis'.''
# Transitions before 1996 are from Shanks (1995).
#
# From Dharmasiri Senanayake, Sri Lanka Media Minister (1996-10-24), as quoted
# by Shamindra in
# <a href="news:54rka5$m5h@mtinsc01-mgt.ops.worldnet.att.net">
# Daily News - Hot News Section (1996-10-26)
# </a>:
# With effect from 12.30 a.m. on 26th October 1996
# Sri Lanka will be six (06) hours ahead of GMT.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Colombo 5:19:24 - LMT 1880
5:19:32 - MMT 1906 # Moratuwa Mean Time
5:30 - IST 1942 Jan 5
5:30 0:30 IHST 1942 Sep
5:30 1:00 IST 1945 Oct 16 2:00
5:30 - IST 1996 May 25 0:00
6:30 - LKT 1996 Oct 26 0:30
6:00 - LKT
# Syria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Syria 1920 1923 - Apr Sun>=15 2:00 1:00 S
Rule Syria 1920 1923 - Oct Sun>=1 2:00 0 -
Rule Syria 1962 only - Apr 29 2:00 1:00 S
Rule Syria 1962 only - Oct 1 2:00 0 -
Rule Syria 1963 1965 - May 1 2:00 1:00 S
Rule Syria 1963 only - Sep 30 2:00 0 -
Rule Syria 1964 only - Oct 1 2:00 0 -
Rule Syria 1965 only - Sep 30 2:00 0 -
Rule Syria 1966 only - Apr 24 2:00 1:00 S
Rule Syria 1966 1976 - Oct 1 2:00 0 -
Rule Syria 1967 1978 - May 1 2:00 1:00 S
Rule Syria 1977 1978 - Sep 1 2:00 0 -
Rule Syria 1983 1984 - Apr 9 2:00 1:00 S
Rule Syria 1983 1984 - Oct 1 2:00 0 -
Rule Syria 1986 only - Feb 16 2:00 1:00 S
Rule Syria 1986 only - Oct 9 2:00 0 -
Rule Syria 1987 only - Mar 1 2:00 1:00 S
Rule Syria 1987 1988 - Oct 31 2:00 0 -
Rule Syria 1988 only - Mar 15 2:00 1:00 S
Rule Syria 1989 only - Mar 31 2:00 1:00 S
Rule Syria 1989 only - Oct 1 2:00 0 -
Rule Syria 1990 only - Apr 1 2:00 1:00 S
Rule Syria 1990 only - Sep 30 2:00 0 -
Rule Syria 1991 only - Apr 1 0:00 1:00 S
Rule Syria 1991 1992 - Oct 1 0:00 0 -
Rule Syria 1992 only - Apr 8 0:00 1:00 S
Rule Syria 1993 only - Mar 26 0:00 1:00 S
Rule Syria 1993 only - Sep 25 0:00 0 -
# IATA SSIM (1996-09) says 1997-03-31; (1998-02) says 1998-04-02;
# (1998-09) says 1999-03-29 and 1999-09-29; (1999-02) says 1999-04-02,
# 2000-04-02, and 2001-04-02; ignore all these claims for now.
Rule Syria 1994 max - Apr 1 0:00 1:00 S
Rule Syria 1994 max - Oct 1 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Damascus 2:25:12 - LMT 1920
2:00 Syria EE%sT
# Tajikistan
# From Shanks (1995), who writes ``date of change uncertain'' for 1991.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dushanbe 4:35:12 - LMT 1924 May 2
5:00 - DUST 1930 Jun 21 # Dushanbe Time
6:00 RussiaAsia DUS%sT 1991 Mar 31 2:00s
5:00 1:00 DUSST 1991 Sep 9 # independence
5:00 RussiaAsia TJ%sT 1992
5:00 - TJT # Tajikistan Time
# Thailand
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bangkok 6:42:04 - LMT 1880
6:42:04 - BMT 1920 Apr # Bangkok Mean Time
7:00 - ICT
# Turkmenistan
# From Shanks (1995):
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Ashkhabad 3:53:32 - LMT 1924 May 2 # or Ashgabat
4:00 - ASHT 1930 Jun 21 # Ashkhabad Time
5:00 - ASHT 1981 Apr 1
5:00 1:00 ASHST 1981 Oct 1
6:00 - ASHT 1982 Apr 1
5:00 RussiaAsia ASH%sT 1991
5:00 - ASHT 1991 Oct 27 # independence
5:00 RussiaAsia TM%sT 1993 # Turkmenistan Time
5:00 - TMT
# United Arab Emirates
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dubai 3:41:12 - LMT 1920
4:00 - GST
# Uzbekistan
# From Shanks (1995):
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Samarkand 4:27:12 - LMT 1924 May 2
4:00 - SAMT 1930 Jun 21 # Samarkand Time
5:00 - SAMT 1981 Apr 1
5:00 1:00 SAMST 1981 Oct 1
6:00 RussiaAsia TAS%sT 1991 Mar 31 2:00 # Tashkent Time
5:00 - TAST 1991 Sep 1 # independence
5:00 - UZT 1992
5:00 RussiaAsia UZ%sT 1993
5:00 - UZT
Zone Asia/Tashkent 4:37:12 - LMT 1924 May 2
5:00 - TAST 1930 Jun 21 # Tashkent Time
6:00 RussiaAsia TAS%sT 1991 Mar 31 2:00s
5:00 - TAST 1991 Sep 1 # independence
5:00 - UZT 1992
5:00 RussiaAsia UZ%sT 1993
5:00 - UZT
# Vietnam
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# Saigon's official name is Thanh-Pho Ho Chi Minh, but it's too long.
# We'll stick with the traditional name for now.
# From Shanks (1991):
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Saigon 7:06:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# Yemen
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Aden 3:00:48 - LMT 1950
3:00 - AST
| # @(#)asia 7.49
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-03-22):
#
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (5th edition),
# San Diego: ACS Publications, Inc. (1999).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst
# LMT Local Mean Time
# 2:00 EET EEST Eastern European Time
# 2:00 IST IDT Israel
# 3:00 AST ADT Arabia*
# 4:00 GST Gulf*
# 5:30 IST India
# 7:00 ICT Indochina*
# 8:00 CST China
# 9:00 CJT Central Japanese Time (1896/1937)*
# 9:00 JST Japan
# 9:00 KST Korea
# 9:30 CST (Australian) Central Standard Time
#
# See the `europe' file for Russia and Turkey in Asia.
# From Guy Harris:
# Incorporates data for Singapore from Robert Elz' asia 1.1, as well as
# additional information from Tom Yap, Sun Microsystems Intercontinental
# Technical Support (including a page from the Official Airline Guide -
# Worldwide Edition). The names for time zones are guesses.
###############################################################################
# These rules are stolen from the `europe' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EUAsia 1981 max - Mar lastSun 1:00u 1:00 S
Rule EUAsia 1996 max - Oct lastSun 1:00u 0 -
Rule E-EurAsia 1981 max - Mar lastSun 0:00 1:00 S
Rule E-EurAsia 1979 1995 - Sep lastSun 0:00 0 -
Rule E-EurAsia 1996 max - Oct lastSun 0:00 0 -
Rule RussiaAsia 1981 1984 - Apr 1 0:00 1:00 S
Rule RussiaAsia 1981 1983 - Oct 1 0:00 0 -
Rule RussiaAsia 1984 1991 - Sep lastSun 2:00s 0 -
Rule RussiaAsia 1985 1991 - Mar lastSun 2:00s 1:00 S
Rule RussiaAsia 1992 only - Mar lastSat 23:00 1:00 S
Rule RussiaAsia 1992 only - Sep lastSat 23:00 0 -
Rule RussiaAsia 1993 max - Mar lastSun 2:00s 1:00 S
Rule RussiaAsia 1993 1995 - Sep lastSun 2:00s 0 -
Rule RussiaAsia 1996 max - Oct lastSun 2:00s 0 -
# Afghanistan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kabul 4:36:48 - LMT 1890
4:00 - AFT 1945
4:30 - AFT
# Armenia
# From Paul Eggert (1999-10-29):
# Shanks has Yerevan switching to 3:00 (with Russian DST) in spring 1991,
# then to 4:00 with no DST in fall 1995, then readopting Russian DST in 1997.
# Go with Shanks, even when he disagrees with others. Edgar Der-Danieliantz
# <edd@AIC.NET> reported (1996-05-04) that Yerevan probably wouldn't use DST
# in 1996, though it did use DST in 1995. IATA SSIM (1991/1998) reports that
# Armenia switched from 3:00 to 4:00 in 1998 and observed DST after 1991,
# but started switching at 3:00s in 1998. IATA SSIM (1999-02) reports
# that they switch one day later in 2001 (i.e. on Mondays).
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Yerevan 2:58:00 - LMT 1924 May 2
3:00 - YERT 1957 Mar # Yerevan Time
4:00 RussiaAsia YER%sT 1991 Mar 31 2:00s
3:00 1:00 YERST 1991 Sep 23 # independence
3:00 RussiaAsia AM%sT 1995 Sep 24 2:00s
4:00 - AMT 1997
4:00 RussiaAsia AM%sT
# Azerbaijan
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Azer 1997 max - Mar lastSun 1:00 1:00 S
Rule Azer 1997 max - Oct lastSun 1:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Baku 3:19:24 - LMT 1924 May 2
3:00 - BAKT 1957 Mar # Baku Time
4:00 RussiaAsia BAK%sT 1991 Mar 31 2:00s
3:00 1:00 BAKST 1991 Aug 30 # independence
3:00 RussiaAsia AZ%sT 1992 Sep lastSun 2:00s
4:00 - AZT 1996 # Azerbaijan time
4:00 EUAsia AZ%sT 1997
4:00 Azer AZ%sT
# Bahrain
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bahrain 3:22:20 - LMT 1920 # Al-Manamah
4:00 - GST 1972 Jun
3:00 - AST
# Bangladesh
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dacca 6:01:40 - LMT 1890
5:53:20 - HMT 1941 Oct # Howrah Mean Time?
6:30 - BURT 1942 May 15 # Burma Time
5:30 - IST 1942 Sep
6:30 - BURT 1951 Sep 30
6:00 - DACT 1971 Mar 26 # Dacca Time
6:00 - BDT # Bangladesh Time
# Bhutan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Thimbu 5:58:36 - LMT 1947 Aug 15
5:30 - IST 1987 Oct
6:00 - BTT # Bhutan Time
# British Indian Ocean Territory
# From Whitman:
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Chagos 5:00 - IOT # BIOT Time
# Brunei
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Brunei 7:39:40 - LMT 1926 Mar # Bandar Seri Begawan
7:30 - BNT 1933
8:00 - BNT
# Burma / Myanmar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Rangoon 6:24:40 - LMT 1880 # or Yangon
6:24:36 - RMT 1920 # Rangoon Mean Time?
6:30 - BURT 1942 May # Burma Time
9:00 - JST 1945 May 3
6:30 - MMT # Myanmar Time
# Cambodia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Phnom_Penh 6:59:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# China
# From Guy Harris:
# People's Republic of China. Yes, they really have only one time zone.
# From Bob Devine (1988-01-28):
# No they don't. See TIME mag, 1986-02-17 p.52. Even though
# China is across 4 physical time zones, before Feb 1, 1986 only the
# Peking (Bejing) time zone was recognized. Since that date, China
# has two of 'em -- Peking's and Urumqi (named after the capital of
# the Xinjiang Uighur Autonomous Region). I don't know about DST for it.
#
# . . .I just deleted the DST table and this editor makes it too
# painful to suck in another copy.. So, here is what I have for
# DST start/end dates for Peking's time zone (info from AP):
#
# 1986 May 4 - Sept 14
# 1987 mid-April - ??
# From U. S. Naval Observatory (1989-01-19):
# CHINA 8 H AHEAD OF UTC ALL OF CHINA, INCL TAIWAN
# CHINA 9 H AHEAD OF UTC APR 17 - SEP 10
# From Paul Eggert <eggert@twinsun.com> (1995-12-19):
# Shanks writes that China has had a single time zone since 1980 May 1,
# observing summer DST from 1986 through 1991; this contradicts Devine's
# note about Time magazine, though apparently _something_ happened in 1986.
# Go with Shanks for now. I made up names for the other pre-1980 time zones.
# From Shanks:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Shang 1940 only - Jun 3 0:00 1:00 D
Rule Shang 1940 1941 - Oct 1 0:00 0 S
Rule Shang 1941 only - Mar 16 0:00 1:00 D
Rule PRC 1949 only - Jan 1 0:00 0 S
Rule PRC 1986 only - May 4 0:00 1:00 D
Rule PRC 1986 1991 - Sep Sun>=11 0:00 0 S
Rule PRC 1987 1991 - Apr Sun>=10 0:00 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Harbin 8:26:44 - LMT 1928
8:30 - HART 1932 Mar # Harbin Time
8:00 - CST 1940
9:00 - HART 1966 May
8:30 - HART 1980 May
8:00 PRC C%sT
Zone Asia/Shanghai 8:05:52 - LMT 1928
8:00 Shang C%sT 1949
8:00 PRC C%sT
Zone Asia/Chungking 7:06:20 - LMT 1928
7:00 - CHUT 1980 May # Chungking Time
8:00 PRC C%sT
Zone Asia/Urumqi 5:50:20 - LMT 1928
6:00 - URUT 1980 May # Urumqi Time
8:00 PRC C%sT
Zone Asia/Kashgar 5:03:56 - LMT 1928
5:30 - KAST 1940 # Kashgar Time
5:00 - KAST 1980 May
8:00 PRC C%sT
# Hong Kong
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule HK 1946 only - Apr 20 3:30 1:00 S
Rule HK 1946 only - Dec 1 3:30 0 -
Rule HK 1947 only - Apr 13 3:30 1:00 S
Rule HK 1947 only - Dec 30 3:30 0 -
Rule HK 1948 only - May 2 3:30 1:00 S
Rule HK 1948 1952 - Oct lastSun 3:30 0 -
Rule HK 1949 1953 - Apr Sun>=1 3:30 1:00 S
Rule HK 1953 only - Nov 1 3:30 0 -
Rule HK 1954 1964 - Mar Sun>=18 3:30 1:00 S
Rule HK 1954 only - Oct 31 3:30 0 -
Rule HK 1955 1964 - Nov Sun>=1 3:30 0 -
Rule HK 1965 1977 - Apr Sun>=16 3:30 1:00 S
Rule HK 1965 1977 - Oct Sun>=16 3:30 0 -
Rule HK 1979 1980 - May Sun>=8 3:30 1:00 S
Rule HK 1979 1980 - Oct Sun>=16 3:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Hong_Kong 7:36:36 - LMT 1904 Oct 30
8:00 HK HK%sT 1997 Jul 1 # return to China
8:00 PRC C%sT
###############################################################################
# Taiwan
# Shanks writes that Taiwan observed DST during 1945, when it
# was still controlled by Japan. This is hard to believe, but we don't
# have any other information.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Taiwan 1945 1951 - May 1 0:00 1:00 D
Rule Taiwan 1945 1951 - Oct 1 0:00 0 S
Rule Taiwan 1952 only - Mar 1 0:00 1:00 D
Rule Taiwan 1952 1954 - Nov 1 0:00 0 S
Rule Taiwan 1953 1959 - Apr 1 0:00 1:00 D
Rule Taiwan 1955 1961 - Oct 1 0:00 0 S
Rule Taiwan 1960 1961 - Jun 1 0:00 1:00 D
Rule Taiwan 1974 1975 - Apr 1 0:00 1:00 D
Rule Taiwan 1974 1975 - Oct 1 0:00 0 S
Rule Taiwan 1980 only - Jun 30 0:00 1:00 D
Rule Taiwan 1980 only - Sep 30 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Taipei 8:06:00 - LMT 1896
8:00 Taiwan C%sT
# Macao
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Macao 1961 1962 - Mar Sun>=16 3:30 1:00 S
Rule Macao 1961 1964 - Nov Sun>=1 3:30 0 -
Rule Macao 1963 only - Mar Sun>=16 0:00 1:00 S
Rule Macao 1964 only - Mar Sun>=16 3:30 1:00 S
Rule Macao 1965 only - Mar Sun>=16 0:00 1:00 S
Rule Macao 1965 only - Oct 31 0:00 0 -
Rule Macao 1966 1971 - Apr Sun>=16 3:30 1:00 S
Rule Macao 1966 1971 - Oct Sun>=16 3:30 0 -
Rule Macao 1972 1974 - Apr Sun>=15 0:00 1:00 S
Rule Macao 1972 1973 - Oct Sun>=15 0:00 0 -
Rule Macao 1974 1977 - Oct Sun>=15 3:30 0 -
Rule Macao 1975 1977 - Apr Sun>=15 3:30 1:00 S
Rule Macao 1978 1980 - Apr Sun>=15 0:00 1:00 S
Rule Macao 1978 1980 - Oct Sun>=15 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Macao 7:34:20 - LMT 1912
8:00 Macao MO%sT 1999 Dec 20 # return to China
8:00 PRC C%sT
###############################################################################
# Cyprus
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cyprus 1975 only - Apr 13 0:00 1:00 S
Rule Cyprus 1975 only - Oct 12 0:00 0 -
Rule Cyprus 1976 only - May 15 0:00 1:00 S
Rule Cyprus 1976 only - Oct 11 0:00 0 -
Rule Cyprus 1977 1980 - Apr Sun>=1 0:00 1:00 S
Rule Cyprus 1977 only - Sep 25 0:00 0 -
Rule Cyprus 1978 only - Oct 2 0:00 0 -
Rule Cyprus 1979 1997 - Sep lastSun 0:00 0 -
Rule Cyprus 1981 1998 - Mar lastSun 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Nicosia 2:13:28 - LMT 1921 Nov 14
2:00 Cyprus EE%sT 1998 Sep
2:00 EUAsia EE%sT
# IATA SSIM (1998-09) has Cyprus using EU rules for the first time.
# Georgia
# From Paul Eggert <eggert@twinsun.com> (1994-11-19):
# Today's _Economist_ (p 60) reports that Georgia moved its clocks forward
# an hour recently, due to a law proposed by Zurab Murvanidze,
# an MP who went on a hunger strike for 11 days to force discussion about it!
# We have no details, but we'll guess they didn't move the clocks back in fall.
#
# From Mathew Englander <mathew@io.org>, quoting AP (1996-10-23 13:05-04):
# Instead of putting back clocks at the end of October, Georgia
# will stay on daylight savings time this winter to save energy,
# President Eduard Shevardnadze decreed Wednesday.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tbilisi 2:59:16 - LMT 1880
2:59:16 - TBMT 1924 May 2 # Tbilisi Mean Time
3:00 - TBIT 1957 Mar # Tbilisi Time
4:00 RussiaAsia TBI%sT 1991 Mar 31 2:00s
3:00 1:00 TBIST 1991 Apr 9 # independence
3:00 RussiaAsia GE%sT 1992 # Georgia Time
3:00 E-EurAsia GE%sT 1994 Sep lastSun
4:00 E-EurAsia GE%sT 1996 Oct lastSun
4:00 1:00 GEST 1997 Mar lastSun
4:00 E-EurAsia GE%sT
# East Timor
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dili 8:22:20 - LMT 1912
8:00 - TPT 1942 Feb 21 23:00
9:00 - JST 1945 Aug
9:00 - TPT 1976 May 3
8:00 - TPT # East Timor Time
# India
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Calcutta 5:53:28 - LMT 1880
5:53:20 - HMT 1941 Oct # Howrah Mean Time?
6:30 - BURT 1942 May 15 # Burma Time
5:30 - IST 1942 Sep
5:30 1:00 IST 1945 Oct 15
5:30 - IST
# The following are like Asia/Calcutta:
# Andaman Is
# Lakshadweep (Laccadive, Minicoy and Amindivi Is)
# Nicobar Is
# Indonesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Jakarta 7:07:12 - LMT 1867 Aug 10
# Shanks says the next transition was at 1924 Jan 1 0:13,
# but this must be a typo.
7:07:12 - JMT 1923 Dec 31 23:47:12 # Jakarta
7:20 - JAVT 1932 Nov # Java Time
7:30 - JAVT 1942 Mar 23
9:00 - JST 1945 Aug
7:30 - JAVT 1948 May
8:00 - JAVT 1950 May
7:30 - JAVT 1964
7:00 - JAVT
Zone Asia/Ujung_Pandang 7:57:36 - LMT 1920
7:57:36 - MMT 1932 Nov # Macassar MT
8:00 - BORT 1942 Feb 9 # Borneo Time
9:00 - JST 1945 Aug
8:00 - BORT
Zone Asia/Jayapura 9:22:48 - LMT 1932 Nov
9:00 - JAYT 1944 # Jayapura Time
9:30 - CST 1964
9:00 - JAYT
# Iran
# From Paul Eggert (1999-09-27), following up a suggestion by Rich Wales:
# Ahmea Alavi in
# <a href="http://tehran.stanford.edu/Iran_Lib/Calendar/taghveem.txt">
# TAGHVEEM (1993-07-12)
# </a>
# writes ``Daylight saving time in Iran starts from the first day
# of Farvardin and ends the first day of Mehr.'' This disagrees with the SSIM:
#
# DST start DST end
# year SSIM Alavi SSIM Alavi
# 1991 05-03!= 03-21 09-20!= 09-23
# 1992 03-22!= 03-21 09-23 09-23
# 1993 03-21 03-21 09-23 09-23
# 1994 03-21 03-21 09-22!= 09-23
# 1995 03-21 03-21 09-22!= 09-23
# 1996 03-21!= 03-20 09-21!= 09-22
# 1997 03-21 03-21 09-21!= 09-23
# 1998 03-21 03-21 09-21!= 09-23
# 1999 03-22!= 03-21 09-22!= 09-23
# 2000 03-21!= 03-20 09-21!= 09-22
# 2001 03-17!= 03-21 09-19!= 09-23
#
# Go with Alavi starting with 1992.
# I used Ed Reingold's cal-persia in GNU Emacs 19.34 to compute Persian dates.
# The Persian calendar is based on the sun, and dates after around 2050
# are approximate; stop after 2037 when 32-bit time_t's overflow.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iran 1978 1980 - Mar 21 0:00 1:00 S
Rule Iran 1978 only - Oct 21 0:00 0 -
Rule Iran 1979 only - Sep 19 0:00 0 -
Rule Iran 1980 only - Sep 23 0:00 0 -
Rule Iran 1991 only - May 3 0:00s 1:00 S
Rule Iran 1991 only - Sep 20 0:00s 0 -
Rule Iran 1992 1995 - Mar 21 0:00 1:00 S
Rule Iran 1992 1995 - Sep 23 0:00 0 -
Rule Iran 1996 only - Mar 20 0:00 1:00 S
Rule Iran 1996 only - Sep 22 0:00 0 -
Rule Iran 1997 1999 - Mar 21 0:00 1:00 S
Rule Iran 1997 1999 - Sep 23 0:00 0 -
Rule Iran 2000 only - Mar 20 0:00 1:00 S
Rule Iran 2000 only - Sep 22 0:00 0 -
Rule Iran 2001 2003 - Mar 21 0:00 1:00 S
Rule Iran 2001 2003 - Sep 23 0:00 0 -
Rule Iran 2004 only - Mar 20 0:00 1:00 S
Rule Iran 2004 only - Sep 22 0:00 0 -
Rule Iran 2005 2007 - Mar 21 0:00 1:00 S
Rule Iran 2005 2007 - Sep 23 0:00 0 -
Rule Iran 2008 only - Mar 20 0:00 1:00 S
Rule Iran 2008 only - Sep 22 0:00 0 -
Rule Iran 2009 2011 - Mar 21 0:00 1:00 S
Rule Iran 2009 2011 - Sep 23 0:00 0 -
Rule Iran 2012 only - Mar 20 0:00 1:00 S
Rule Iran 2012 only - Sep 22 0:00 0 -
Rule Iran 2013 2015 - Mar 21 0:00 1:00 S
Rule Iran 2013 2015 - Sep 23 0:00 0 -
Rule Iran 2016 only - Mar 20 0:00 1:00 S
Rule Iran 2016 only - Sep 22 0:00 0 -
Rule Iran 2017 2019 - Mar 21 0:00 1:00 S
Rule Iran 2017 2019 - Sep 23 0:00 0 -
Rule Iran 2020 only - Mar 20 0:00 1:00 S
Rule Iran 2020 only - Sep 22 0:00 0 -
Rule Iran 2021 2023 - Mar 21 0:00 1:00 S
Rule Iran 2021 2023 - Sep 23 0:00 0 -
Rule Iran 2024 2025 - Mar 20 0:00 1:00 S
Rule Iran 2024 2025 - Sep 22 0:00 0 -
Rule Iran 2026 2027 - Mar 21 0:00 1:00 S
Rule Iran 2026 2027 - Sep 23 0:00 0 -
Rule Iran 2028 2029 - Mar 20 0:00 1:00 S
Rule Iran 2028 2029 - Sep 22 0:00 0 -
Rule Iran 2030 2031 - Mar 21 0:00 1:00 S
Rule Iran 2030 2031 - Sep 23 0:00 0 -
Rule Iran 2032 2033 - Mar 20 0:00 1:00 S
Rule Iran 2032 2033 - Sep 22 0:00 0 -
Rule Iran 2034 2035 - Mar 21 0:00 1:00 S
Rule Iran 2034 2035 - Sep 23 0:00 0 -
Rule Iran 2036 2037 - Mar 20 0:00 1:00 S
Rule Iran 2036 2037 - Sep 22 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tehran 3:25:44 - LMT 1916
3:25:44 - TMT 1946 # Tehran Mean Time
3:30 - IRT 1977 Nov
4:00 Iran IR%sT 1979
3:30 Iran IR%sT
# Iraq
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Iraq 1982 only - May 1 0:00 1:00 D
Rule Iraq 1982 1984 - Oct 1 0:00 0 S
Rule Iraq 1983 only - Mar 31 0:00 1:00 D
Rule Iraq 1984 1985 - Apr 1 0:00 1:00 D
Rule Iraq 1985 1990 - Sep lastSun 1:00s 0 S
Rule Iraq 1986 1990 - Mar lastSun 1:00s 1:00 D
# IATA SSIM (1991/1996) says Apr 1 12:01am UTC; guess the `:01' is a typo.
# Shanks says Iraq did not observe DST 1992/1997 or 1999 on; ignore this.
Rule Iraq 1991 max - Apr 1 3:00s 1:00 D
Rule Iraq 1991 max - Oct 1 3:00s 0 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Baghdad 2:57:40 - LMT 1890
2:57:36 - BMT 1918 # Baghdad Mean Time?
3:00 - AST 1982 May
3:00 Iraq A%sT
###############################################################################
# Israel
# From U. S. Naval Observatory (1989-01-19):
# ISRAEL 2 H AHEAD OF UTC
# ISRAEL 3 H AHEAD OF UTC APR 10 - SEP 3
# From Shanks:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1940 only - Jun 1 0:00 1:00 D
Rule Zion 1942 1944 - Nov 1 0:00 0 S
Rule Zion 1943 only - Apr 1 2:00 1:00 D
Rule Zion 1944 only - Apr 1 0:00 1:00 D
Rule Zion 1945 only - Apr 16 0:00 1:00 D
Rule Zion 1945 only - Nov 1 2:00 0 S
Rule Zion 1946 only - Apr 16 2:00 1:00 D
Rule Zion 1946 only - Nov 1 0:00 0 S
Rule Zion 1948 only - May 23 0:00 2:00 DD
Rule Zion 1948 only - Sep 1 0:00 1:00 D
Rule Zion 1948 1949 - Nov 1 2:00 0 S
Rule Zion 1949 only - May 1 0:00 1:00 D
Rule Zion 1950 only - Apr 16 0:00 1:00 D
Rule Zion 1950 only - Sep 15 3:00 0 S
Rule Zion 1951 only - Apr 1 0:00 1:00 D
Rule Zion 1951 only - Nov 11 3:00 0 S
Rule Zion 1952 only - Apr 20 2:00 1:00 D
Rule Zion 1952 only - Oct 19 3:00 0 S
Rule Zion 1953 only - Apr 12 2:00 1:00 D
Rule Zion 1953 only - Sep 13 3:00 0 S
Rule Zion 1954 only - Jun 13 0:00 1:00 D
Rule Zion 1954 only - Sep 12 0:00 0 S
Rule Zion 1955 only - Jun 11 2:00 1:00 D
Rule Zion 1955 only - Sep 11 0:00 0 S
Rule Zion 1956 only - Jun 3 0:00 1:00 D
Rule Zion 1956 only - Sep 30 3:00 0 S
Rule Zion 1957 only - Apr 29 2:00 1:00 D
Rule Zion 1957 only - Sep 22 0:00 0 S
Rule Zion 1974 only - Jul 7 0:00 1:00 D
Rule Zion 1974 only - Oct 13 0:00 0 S
Rule Zion 1975 only - Apr 20 0:00 1:00 D
Rule Zion 1975 only - Aug 31 0:00 0 S
Rule Zion 1985 only - Apr 14 0:00 1:00 D
Rule Zion 1985 only - Sep 15 0:00 0 S
Rule Zion 1986 only - May 18 0:00 1:00 D
Rule Zion 1986 only - Sep 7 0:00 0 S
Rule Zion 1987 only - Apr 15 0:00 1:00 D
Rule Zion 1987 only - Sep 13 0:00 0 S
Rule Zion 1988 only - Apr 9 0:00 1:00 D
Rule Zion 1988 only - Sep 3 0:00 0 S
# From Ephraim Silverberg <ephraim@cs.huji.ac.il>
# (1997-03-04, 1998-03-16 and 1998-12-28):
# According to the Office of the Secretary General of the Ministry of
# Interior, there is NO set rule for Daylight-Savings/Standard time changes.
# One thing is entrenched in law, however: that there must be at least 150
# days of daylight savings time annually. From 1993-1998, the change to
# daylight savings time was on a Friday morning from midnight IST to
# 1 a.m IDT; up until 1998, the change back to standard time was on a
# Saturday night from midnight daylight savings time to 11 p.m. standard
# time. 1996 is an exception to this rule where the change back to standard
# time took place on Sunday night instead of Saturday night to avoid
# conflicts with the Jewish New Year. Starting in 1999, the change to
# daylight savings time will still be on a Friday morning but from
# 2 a.m. IST to 3 a.m. IDT; furthermore, the change back to standard time
# will now also be on a Friday morning from 2 a.m. IDT to 1 a.m. IST.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1989 only - Apr 30 0:00 1:00 D
Rule Zion 1989 only - Sep 3 0:00 0 S
Rule Zion 1990 only - Mar 25 0:00 1:00 D
Rule Zion 1990 only - Aug 26 0:00 0 S
Rule Zion 1991 only - Mar 24 0:00 1:00 D
Rule Zion 1991 only - Sep 1 0:00 0 S
Rule Zion 1992 only - Mar 29 0:00 1:00 D
Rule Zion 1992 only - Sep 6 0:00 0 S
Rule Zion 1993 only - Apr 2 0:00 1:00 D
Rule Zion 1993 only - Sep 5 0:00 0 S
# The dates for 1994-1995 were obtained from Office of the Spokeswoman for the
# Ministry of Interior, Jerusalem, Israel. The spokeswoman can be reached by
# calling the office directly at 972-2-6701447 or 972-2-6701448.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1994 only - Apr 1 0:00 1:00 D
Rule Zion 1994 only - Aug 28 0:00 0 S
Rule Zion 1995 only - Mar 31 0:00 1:00 D
Rule Zion 1995 only - Sep 3 0:00 0 S
# The dates for 1996 were determined by the Minister of Interior of the
# time, Haim Ramon. The official announcement regarding 1996-1998
# (with the dates for 1997-1998 no longer being relevant) can be viewed at:
#
# ftp://ftp.huji.ac.il/pub/tz/announcements/1996-1998.ramon.ps.gz
#
# The dates for 1997-1998 were altered by his successor, Rabbi Eli Suissa.
#
# The official announcements for the years 1997-1999 can be viewed at:
#
# ftp://ftp.huji.ac.il/pub/tz/announcements/YYYY.ps.gz
#
# where YYYY is the relevant year.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 1996 only - Mar 15 0:00 1:00 D
Rule Zion 1996 only - Sep 16 0:00 0 S
Rule Zion 1997 only - Mar 21 0:00 1:00 D
Rule Zion 1997 only - Sep 14 0:00 0 S
Rule Zion 1998 only - Mar 20 0:00 1:00 D
Rule Zion 1998 only - Sep 6 0:00 0 S
Rule Zion 1999 only - Apr 2 2:00 1:00 D
Rule Zion 1999 only - Sep 3 2:00 0 S
# Due to imminent elections in 1999, there are no dates for the year 2000
# and beyond. There was a move to legislate the DST rules in Israel, but
# due to the government's fall, it most likely won't be brought to the Knesset
# for first reading before the elections and will probably be altered by the
# newly elected government.
# From Paul Eggert (1999-01-30):
# Here are guesses for rules after 1999.
# They are probably wrong, but they are more likely than no DST at all.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Zion 2000 max - Apr Fri>=1 2:00 1:00 D
Rule Zion 2000 max - Sep Fri>=1 2:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Jerusalem 2:20:56 - LMT 1880
2:20:40 - JMT 1918 # Jerusalem Mean Time?
2:00 Zion I%sT
###############################################################################
# Japan
# `9:00' and `JST' is from Guy Harris.
# From Paul Eggert <eggert@twinsun.com> (1995-03-06):
# Today's _Asahi Evening News_ (page 4) reports that Japan had
# daylight saving between 1948 and 1951, but ``the system was discontinued
# because the public believed it would lead to longer working hours.''
# Shanks writes that daylight saving in Japan during those years was as follows:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
#Rule Japan 1948 only - May Sun>=1 2:00 1:00 D
#Rule Japan 1948 1951 - Sep Sat>=8 2:00 0 S
#Rule Japan 1949 only - Apr Sun>=1 2:00 1:00 D
#Rule Japan 1950 1951 - May Sun>=1 2:00 1:00 D
# but the only locations using it were US military bases.
# We go with Shanks and omit daylight saving in those years for Asia/Tokyo.
# From Hideyuki Suzuki (1998-11-09):
# 'Tokyo' usually stands for the former location of Tokyo Astronomical
# Observatory: E 139 44' 40".90 (9h 18m 58s.727), N 35 39' 16".0.
# This data is from 'Rika Nenpyou (Chronological Scientific Tables) 1996'
# edited by National Astronomical Observatory of Japan....
# JST (Japan Standard Time) has been used since 1888-01-01 00:00 (JST).
# The law is enacted on 1886-07-07.
# From Hideyuki Suzuki (1998-11-16):
# The ordinance No. 51 (1886) established "standard time" in Japan,
# which stands for the time on E 135 degree.
# In the ordinance No. 167 (1895), "standard time" was renamed to "central
# standard time". And the same ordinance also established "western standard
# time", which stands for the time on E 120 degree.... But "western standard
# time" was abolished in the ordinance No. 529 (1937). In the ordinance No.
# 167, there is no mention regarding for what place western standard time is
# standard....
#
# I wrote "ordinance" above, but I don't know how to translate.
# In Japanese it's "chokurei", which means ordinance from emperor.
# Shanks claims JST in use since 1896, and that a few places (e.g. Ishigaki)
# use +0800; go with Suzuki. Guess that all ordinances took effect on Jan 1.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Tokyo 9:18:59 - LMT 1887 Dec 31 15:00u
9:00 - JST 1896
9:00 - CJT 1938
9:00 - JST
# Since 1938, all Japanese possessions have been like Asia/Tokyo.
# Jordan
#
# From
# <a href="http://star.arabia.com/990701/JO9.html">
# Jordan Week (1999-07-01)
# </a> via Steffen Thorsen (1999-09-09):
# Clocks in Jordan were forwarded one hour on Wednesday at midnight,
# in accordance with the government's decision to implement summer time
# all year round.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Jordan 1973 only - Jun 6 0:00 1:00 S
Rule Jordan 1973 1975 - Oct 1 0:00 0 -
Rule Jordan 1974 1977 - May 1 0:00 1:00 S
Rule Jordan 1976 only - Nov 1 0:00 0 -
Rule Jordan 1977 only - Oct 1 0:00 0 -
Rule Jordan 1978 only - Apr 30 0:00 1:00 S
Rule Jordan 1978 only - Sep 30 0:00 0 -
Rule Jordan 1985 only - Apr 1 0:00 1:00 S
Rule Jordan 1985 only - Oct 1 0:00 0 -
Rule Jordan 1986 1988 - Apr Fri>=1 0:00 1:00 S
Rule Jordan 1986 1990 - Oct Fri>=1 0:00 0 -
Rule Jordan 1989 only - May 8 0:00 1:00 S
Rule Jordan 1990 only - Apr 27 0:00 1:00 S
Rule Jordan 1991 only - Apr 17 0:00 1:00 S
Rule Jordan 1991 only - Sep 27 0:00 0 -
Rule Jordan 1992 only - Apr 10 0:00 1:00 S
Rule Jordan 1992 1993 - Oct Fri>=1 0:00 0 -
Rule Jordan 1993 1998 - Apr Fri>=1 0:00 1:00 S
Rule Jordan 1994 only - Sep Fri>=15 0:00 0 -
Rule Jordan 1995 1998 - Sep Fri>=15 0:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Amman 2:23:44 - LMT 1931
2:00 Jordan EE%sT 1999 Jul
2:00 1:00 EEST
# Kazakhstan
# From Paul Eggert (1996-11-22):
# Andrew Evtichov <evti@chevron.com> (1996-04-13) writes that Kazakhstan
# stayed in sync with Moscow after 1990, and that Aqtobe (formerly Aktyubinsk)
# and Aqtau (formerly Shevchenko) are the largest cities in their zones.
# Guess that Aqtau and Aqtobe diverged in 1995, since that's the first time
# IATA SSIM mentions a third time zone in Kazakhstan.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Almaty 5:07:48 - LMT 1924 May 2 # or Alma-Ata
5:00 - ALMT 1957 Mar # Alma-Ata Time
6:00 RussiaAsia ALM%sT 1991 Mar 31 2:00s
5:00 1:00 ALMST 1991 Sep 29 2:00s
5:00 - ALMT 1992 Jan 19 2:00s
6:00 E-EurAsia ALM%sT
Zone Asia/Aqtobe 3:48:40 - LMT 1924 May 2
4:00 - AKT 1957 Mar # Aktyubinsk Time
5:00 RussiaAsia AK%sT 1991 Mar 31 2:00s
4:00 1:00 AKTST 1991 Sep 29 2:00s
4:00 - AQTT 1992 Jan 19 2:00s # Aqtobe Time
5:00 E-EurAsia AQT%sT
Zone Asia/Aqtau 3:21:04 - LMT 1924 May 2 # or Aktau
4:00 - SHET 1957 Mar # Fort Shevchenko Time
5:00 RussiaAsia SHE%sT 1991 Mar 31 2:00s
4:00 1:00 AQTST 1991 Sep 29 2:00s
4:00 - AQTT 1992 Jan 19 2:00s # Aqtau Time
5:00 E-EurAsia AQT%sT 1995 Sep lastSun
4:00 E-EurAsia AQT%sT
# Kirgizstan
# Transitions through 1991 are from Shanks.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Kirgiz 1992 1996 - Apr Sun>=7 0:00 1:00 S
Rule Kirgiz 1992 1996 - Sep lastSun 0:00 0 -
Rule Kirgiz 1997 max - Mar lastSun 2:30 1:00 S
Rule Kirgiz 1997 max - Oct lastSun 2:30 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bishkek 4:58:24 - LMT 1924 May 2
5:00 - FRUT 1930 Jun 21 # Frunze Time
6:00 RussiaAsia FRU%sT 1991 Mar 31 2:00s
5:00 1:00 FRUST 1991 Aug 31 2:00 # independence
5:00 Kirgiz KG%sT # Kirgizstan Time
###############################################################################
# Korea (North and South)
# From Guy Harris:
# According to someone at the Korean Times in San Francisco,
# Daylight Savings Time was not observed until 1987. He did not know
# at what time of day DST starts or ends.
# From Shanks:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule ROK 1960 only - May 15 0:00 1:00 D
Rule ROK 1960 only - Sep 13 0:00 0 S
Rule ROK 1987 1988 - May Sun<=14 0:00 1:00 D
Rule ROK 1987 1988 - Oct Sun<=14 0:00 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Seoul 8:27:52 - LMT 1890
8:30 - KST 1904 Dec
9:00 - KST 1928
8:30 - KST 1932
9:00 - KST 1954 Mar 21
8:00 ROK K%sT 1961 Aug 10
8:30 - KST 1968 Oct
9:00 ROK K%sT
Zone Asia/Pyongyang 8:23:00 - LMT 1890
8:30 - KST 1904 Dec
9:00 - KST 1928
8:30 - KST 1932
9:00 - KST 1954 Mar 21
8:00 - KST 1961 Aug 10
9:00 - KST
###############################################################################
# Kuwait
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kuwait 3:11:56 - LMT 1950
3:00 - AST
# Laos
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Vientiane 6:50:24 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# Lebanon
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Lebanon 1920 only - Mar 28 0:00 1:00 S
Rule Lebanon 1920 only - Oct 25 0:00 0 -
Rule Lebanon 1921 only - Apr 3 0:00 1:00 S
Rule Lebanon 1921 only - Oct 3 0:00 0 -
Rule Lebanon 1922 only - Mar 26 0:00 1:00 S
Rule Lebanon 1922 only - Oct 8 0:00 0 -
Rule Lebanon 1923 only - Apr 22 0:00 1:00 S
Rule Lebanon 1923 only - Sep 16 0:00 0 -
Rule Lebanon 1957 1961 - May 1 0:00 1:00 S
Rule Lebanon 1957 1961 - Oct 1 0:00 0 -
Rule Lebanon 1972 only - Jun 22 0:00 1:00 S
Rule Lebanon 1972 1977 - Oct 1 0:00 0 -
Rule Lebanon 1973 1977 - May 1 0:00 1:00 S
Rule Lebanon 1978 only - Apr 30 0:00 1:00 S
Rule Lebanon 1978 only - Sep 30 0:00 0 -
Rule Lebanon 1984 1987 - May 1 0:00 1:00 S
Rule Lebanon 1984 1991 - Oct 16 0:00 0 -
Rule Lebanon 1988 only - Jun 1 0:00 1:00 S
Rule Lebanon 1989 only - May 10 0:00 1:00 S
Rule Lebanon 1990 1992 - May 1 0:00 1:00 S
Rule Lebanon 1992 only - Oct 4 0:00 0 -
Rule Lebanon 1993 max - Mar lastSun 0:00 1:00 S
Rule Lebanon 1993 1998 - Sep lastSun 0:00 0 -
Rule Lebanon 1999 max - Oct lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Beirut 2:22:00 - LMT 1880
2:00 Lebanon EE%sT
# Malaysia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NBorneo 1935 1941 - Sep 14 0:00 0:20 TS
Rule NBorneo 1935 1941 - Dec 14 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Kuala_Lumpur 6:46:48 - LMT 1880
6:55:24 - SMT 1905 Jun # Singapore Mean Time
7:00 - MALT 1933 # Malaya Time
7:20 - MALT 1942 Feb 15
9:00 - JST 1945 Sep 2
7:20 - MALT 1950
7:30 - MALT 1982 May
8:00 - MYT # Malaysia Time
Zone Asia/Kuching 7:21:20 - LMT 1926 Mar
7:30 - BORT 1933 # Borneo Time
8:00 NBorneo BOR%sT 1942
9:00 - JST 1945 Sep 2
8:00 - BORT 1982 May
8:00 - MYT
# Maldives
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Maldives 4:54:00 - LMT 1880 # Male
4:54:00 - MMT 1960 # Male Mean Time
5:00 - MVT # Maldives Time
# Mongolia
# Shanks says that Mongolia has three time zones, but usno1995 and the CIA map
# Standard Time Zones of the World (1997-01)
# both say that it has just one.
# Let's comment out the western and eastern Mongolian time zones
# till we know what their principal towns are.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Mongol 1981 1984 - Apr 1 0:00 1:00 S
Rule Mongol 1981 1984 - Oct 1 0:00 0 -
Rule Mongol 1985 1990 - Mar lastSun 2:00 1:00 S
Rule Mongol 1985 1990 - Sep lastSun 3:00 0 -
Rule Mongol 1991 max - Mar lastSun 0:00 1:00 S
Rule Mongol 1991 1995 - Sep lastSun 0:00 0 -
# IATA SSIM (1996-09) says 1996-10-25; go with Shanks.
Rule Mongol 1996 only - Oct lastSun 0:00 0 -
Rule Mongol 1997 max - Sep lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#Zone Asia/Dariv 6:14:32 - LMT 1905 Aug
# 6:00 - DART 1978 # Dariv Time
# 7:00 Mongol DAR%sT
Zone Asia/Ulan_Bator 7:07:32 - LMT 1905 Aug
7:00 - ULAT 1978 # Ulan Bator Time
8:00 Mongol ULA%sT
#Zone Asia/Baruun-Urt 7:33:00 - LMT 1905 Aug
# 8:00 - BART 1978 # Baruun-Urt Time
# 9:00 Mongol BAR%sT
# Nepal
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Katmandu 5:41:16 - LMT 1920
5:30 - IST 1986
5:45 - NPT # Nepal Time
# Oman
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Muscat 3:54:20 - LMT 1920
4:00 - GST
# Pakistan
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Karachi 4:28:12 - LMT 1907
5:30 - IST 1942 Sep
5:30 1:00 IST 1945 Oct 15
5:30 - IST 1951 Sep 30
5:00 - KART 1971 Mar 26 # Karachi Time
5:00 - PKT # Pakistan Time
# Palestine
# From Amos Shapir <amos@nsof.co.il> (1998-02-15):
#
# From 1917 until 1948-05-15, all of Palestine, including the parts now
# known as the Gaza Strip and the West Bank, was under British rule.
# Therefore the rules given for Israel for that period, apply there too...
#
# The Gaza Strip was under Egyptian rule between 1948-05-15 until 1967-06-05
# (except a short occupation by Israel from 1956-11 till 1957-03, but no
# time zone was affected then). It was never formally annexed to Egypt,
# though.
#
# The rest of Palestine was under Jordanian rule at that time, formally
# annexed in 1950 as the West Bank (and the word "Trans" was dropped from
# the country's previous name of "the Hashemite Kingdom of the
# Trans-Jordan"). So the rules for Jordan for that time apply. Major
# towns in that area are Nablus (Shchem), El-Halil (Hebron), Ramallah, and
# East Jerusalem.
#
# Both areas were occupied by Israel in June 1967, but not annexed (except
# for East Jerusalem). They were on Israel time since then; there might
# have been a Military Governor's order about time zones, but I'm not aware
# of any (such orders may have been issued semi-annually whenever summer
# time was in effect, but maybe the legal aspect of time was just neglected).
#
# The Palestinian Authority was established in 1993, and got hold of most
# towns in the West Bank and Gaza by 1995. I know that in order to
# demonstrate...independence, they have been switching to
# summer time and back on a different schedule than Israel's, but I don't
# know when this was started, or what algorithm is used (most likely the
# Jordanian one).
#
# To summarize, the table should probably look something like that:
#
# Area \ when | 1918-1947 | 1948-1967 | 1967-1995 | 1996-
# ------------+-----------+-----------+-----------+-----------
# Israel | Zion | Zion | Zion | Zion
# West bank | Zion | Jordan | Zion | Jordan
# Gaza | Zion | Egypt | Zion | Jordan
#
# I guess more info may be available from the PA's web page (if/when they
# have one).
# From Paul Eggert (1998-02-25):
# Shanks writes that Gaza did not observe DST until 1957, but we'll go
# with Shapir and assume that it observed DST from 1940 through 1947,
# and that it used Jordanian rules starting in 1996.
# We don't yet need a separate entry for the West Bank, since
# the only differences between it and Gaza that we know about
# occurred before our cutoff date of 1970.
# However, as we get more information, we may need to add entries
# for parts of the West Bank as they transitioned from Israel's rules
# to Palestine's rules. If you have more info about this, please
# send it to tz@elsie.nci.nih.gov for incorporation into future editions.
# From IINS News Service - Israel - 1998-03-23 10:38:07 Israel time,
# forwarded by Ephraim Silverberg:
#
# Despite the fact that Israel changed over to daylight savings time
# last week, the PLO Authority (PA) has decided not to turn its clocks
# one-hour forward at this time. As a sign of independence from Israeli rule,
# the PA has decided to implement DST in April.
# From Paul Eggert (1999-09-20):
# Daoud Kuttab writes in
# <a href="http://www.jpost.com/com/Archive/22.Apr.1999/Opinion/Article-2.html">
# Holiday havoc
# </a> (Jerusalem Post, 1999-04-22) that
# the Palestinian National Authority changed to DST on 1999-04-15.
# I vaguely recall that they switch back in October (sorry, forgot the source).
# For now, let's assume that the spring switch was at 24:00,
# and that they switch at 0:00 on the 3rd Fridays of April and October.
# The rules for Egypt are stolen from the `africa' file.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule EgyptAsia 1957 only - May 10 0:00 1:00 S
Rule EgyptAsia 1957 1958 - Oct 1 0:00 0 -
Rule EgyptAsia 1958 only - May 1 0:00 1:00 S
Rule EgyptAsia 1959 1967 - May 1 1:00 1:00 S
Rule EgyptAsia 1959 1965 - Sep 30 3:00 0 -
Rule EgyptAsia 1966 only - Oct 1 3:00 0 -
Rule Palestine 1999 max - Apr Fri>=15 0:00 1:00 S
Rule Palestine 1999 max - Oct Fri>=15 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Gaza 2:17:52 - LMT 1900 Oct
2:00 Zion EET 1948 May 15
2:00 EgyptAsia EE%sT 1967 Jun 5
2:00 Zion I%sT 1996
2:00 Jordan EE%sT 1999
2:00 Palestine EE%sT
# Paracel Is
# no information
# Philippines
# Howse writes (p 153) that until 1844 the Philippines kept American date.
# The rest of this data is from Shanks.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Phil 1936 only - Nov 1 0:00 1:00 S
Rule Phil 1937 only - Feb 1 0:00 0 -
Rule Phil 1954 only - Apr 12 0:00 1:00 S
Rule Phil 1954 only - Jul 1 0:00 0 -
Rule Phil 1978 only - Mar 22 0:00 1:00 S
Rule Phil 1978 only - Sep 21 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Manila -15:56:00 - LMT 1844
8:04:00 - LMT 1899 May 11
8:00 Phil PH%sT 1942 May
9:00 - JST 1944 Nov
8:00 Phil PH%sT
# Qatar
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Qatar 3:26:08 - LMT 1920 # Al Dawhah
4:00 - GST 1972 Jun
3:00 - AST
# Saudi Arabia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Riyadh 3:06:52 - LMT 1950
3:00 - AST
# Singapore
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Singapore 6:55:24 - LMT 1880
6:55:24 - SMT 1905 Jun # Singapore Mean Time
7:00 - MALT 1933 # Malaya Time
7:20 - MALT 1942 Feb 15
9:00 - JST 1945 Sep 2
7:20 - MALT 1950
7:30 - MALT 1965 Aug 9 # independence
7:30 - SGT 1982 May # Singapore Time
8:00 - SGT
# Spratly Is
# no information
# Sri Lanka
# From Paul Eggert (1996-09-03):
# "Sri Lanka advances clock by an hour to avoid blackout"
# (www.virtual-pc.com/lankaweb/news/items/240596-2.html, 1996-05-24,
# no longer available as of 1999-08-17)
# reported ``the country's standard time will be put forward by one hour at
# midnight Friday (1830 GMT) `in the light of the present power crisis'.''
#
# From Dharmasiri Senanayake, Sri Lanka Media Minister (1996-10-24), as quoted
# by Shamindra in
# <a href="news:54rka5$m5h@mtinsc01-mgt.ops.worldnet.att.net">
# Daily News - Hot News Section (1996-10-26)
# </a>:
# With effect from 12.30 a.m. on 26th October 1996
# Sri Lanka will be six (06) hours ahead of GMT.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Colombo 5:19:24 - LMT 1880
5:19:32 - MMT 1906 # Moratuwa Mean Time
5:30 - IST 1942 Jan 5
5:30 0:30 IHST 1942 Sep
5:30 1:00 IST 1945 Oct 16 2:00
5:30 - IST 1996 May 25 0:00
6:30 - LKT 1996 Oct 26 0:30
6:00 - LKT
# Syria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Syria 1920 1923 - Apr Sun>=15 2:00 1:00 S
Rule Syria 1920 1923 - Oct Sun>=1 2:00 0 -
Rule Syria 1962 only - Apr 29 2:00 1:00 S
Rule Syria 1962 only - Oct 1 2:00 0 -
Rule Syria 1963 1965 - May 1 2:00 1:00 S
Rule Syria 1963 only - Sep 30 2:00 0 -
Rule Syria 1964 only - Oct 1 2:00 0 -
Rule Syria 1965 only - Sep 30 2:00 0 -
Rule Syria 1966 only - Apr 24 2:00 1:00 S
Rule Syria 1966 1976 - Oct 1 2:00 0 -
Rule Syria 1967 1978 - May 1 2:00 1:00 S
Rule Syria 1977 1978 - Sep 1 2:00 0 -
Rule Syria 1983 1984 - Apr 9 2:00 1:00 S
Rule Syria 1983 1984 - Oct 1 2:00 0 -
Rule Syria 1986 only - Feb 16 2:00 1:00 S
Rule Syria 1986 only - Oct 9 2:00 0 -
Rule Syria 1987 only - Mar 1 2:00 1:00 S
Rule Syria 1987 1988 - Oct 31 2:00 0 -
Rule Syria 1988 only - Mar 15 2:00 1:00 S
Rule Syria 1989 only - Mar 31 2:00 1:00 S
Rule Syria 1989 only - Oct 1 2:00 0 -
Rule Syria 1990 only - Apr 1 2:00 1:00 S
Rule Syria 1990 only - Sep 30 2:00 0 -
Rule Syria 1991 only - Apr 1 0:00 1:00 S
Rule Syria 1991 1992 - Oct 1 0:00 0 -
Rule Syria 1992 only - Apr 8 0:00 1:00 S
Rule Syria 1993 only - Mar 26 0:00 1:00 S
Rule Syria 1993 only - Sep 25 0:00 0 -
# IATA SSIM (1998-02) says 1998-04-02;
# (1998-09) says 1999-03-29 and 1999-09-29; (1999-02) says 1999-04-02,
# 2000-04-02, and 2001-04-02; ignore all these claims and go with Shanks.
Rule Syria 1994 1996 - Apr 1 0:00 1:00 S
Rule Syria 1994 max - Oct 1 0:00 0 -
Rule Syria 1997 1998 - Mar lastMon 0:00 1:00 S
Rule Syria 1999 max - Apr 1 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Damascus 2:25:12 - LMT 1920
2:00 Syria EE%sT
# Tajikistan
# From Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dushanbe 4:35:12 - LMT 1924 May 2
5:00 - DUST 1930 Jun 21 # Dushanbe Time
6:00 RussiaAsia DUS%sT 1991 Mar 31 2:00s
5:00 1:00 DUSST 1991 Sep 9 2:00s
5:00 - TJT # Tajikistan Time
# Thailand
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Bangkok 6:42:04 - LMT 1880
6:42:04 - BMT 1920 Apr # Bangkok Mean Time
7:00 - ICT
# Turkmenistan
# From Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Ashkhabad 3:53:32 - LMT 1924 May 2 # or Ashgabat
4:00 - ASHT 1930 Jun 21 # Ashkhabad Time
5:00 RussiaAsia ASH%sT 1991 Mar 31 2:00
4:00 RussiaAsia ASH%sT 1991 Oct 27 # independence
4:00 RussiaAsia TM%sT 1992 Jan 19 2:00
5:00 - TMT
# United Arab Emirates
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Dubai 3:41:12 - LMT 1920
4:00 - GST
# Uzbekistan
# 1991 transitions are from Shanks.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Samarkand 4:27:12 - LMT 1924 May 2
4:00 - SAMT 1930 Jun 21 # Samarkand Time
5:00 - SAMT 1981 Apr 1
5:00 1:00 SAMST 1981 Oct 1
6:00 RussiaAsia TAS%sT 1991 Mar 31 2:00 # Tashkent Time
5:00 RussiaAsia TAS%sT 1991 Sep 1 # independence
5:00 RussiaAsia UZ%sT 1992
5:00 RussiaAsia UZ%sT 1993
5:00 - UZT
Zone Asia/Tashkent 4:37:12 - LMT 1924 May 2
5:00 - TAST 1930 Jun 21 # Tashkent Time
6:00 RussiaAsia TAS%sT 1991 Mar 31 2:00s
5:00 RussiaAsia TAS%sT 1991 Sep 1 # independence
5:00 RussiaAsia UZ%sT 1992
5:00 RussiaAsia UZ%sT 1993
5:00 - UZT
# Vietnam
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# Saigon's official name is Thanh-Pho Ho Chi Minh, but it's too long.
# We'll stick with the traditional name for now.
# From Shanks:
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Saigon 7:06:40 - LMT 1906 Jun 9
7:06:20 - SMT 1911 Mar 11 0:01 # Saigon MT?
7:00 - ICT 1912 May
8:00 - ICT 1931 May
7:00 - ICT
# Yemen
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Asia/Aden 3:00:48 - LMT 1950
3:00 - AST
|
226,407,325,045,629 | # @(#)australasia 7.51
# This file also includes Pacific islands.
# Notes are at the end of this file
###############################################################################
# Australia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Shanks gives 1917 Jan 1 0:01; go with Whitman (and guess 2:00).
Rule Aus 1916 only - Oct 1 2:00 1:00 -
Rule Aus 1917 only - Mar 25 2:00 0 -
Rule Aus 1942 only - Jan 1 2:00 1:00 -
Rule Aus 1942 only - Mar 29 2:00 0 -
Rule Aus 1942 only - Sep 27 2:00 1:00 -
Rule Aus 1943 1944 - Mar lastSun 2:00 0 -
Rule Aus 1943 only - Oct 3 2:00 1:00 -
# Go with Whitman and the Australian National Standards Commission, which
# says W Australia didn't use DST in 1943/1944. Ignore Whitman's claim that
# 1944/1945 was just like 1943/1944.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Northern Territory
Zone Australia/Darwin 8:43:20 - LMT 1895 Feb
9:00 - CST 1899 May
9:30 - CST 1917 Jan 1 0:01
9:30 Aus CST
# Western Australia
Zone Australia/Perth 7:43:24 - LMT 1895 Dec
8:00 - WST 1917 Jan 1 0:01
8:00 Aus WST 1943 Jul
8:00 - WST 1974 Oct lastSun 2:00s
8:00 1:00 WST 1975 Mar Sun>=1 2:00s
8:00 - WST 1983 Oct lastSun 2:00s
8:00 1:00 WST 1984 Mar Sun>=1 2:00s
8:00 - WST 1991 Nov 17 2:00s
8:00 1:00 WST 1992 Mar Sun>=1 2:00s
8:00 - WST
# Queensland
#
# From Alex Livingston <alex@agsm.unsw.edu.au> (1996-11-01):
# I have heard or read more than once that some resort islands off the coast
# of Queensland chose to keep observing daylight-saving time even after
# Queensland ceased to.
#
# From Paul Eggert (1996-11-22):
# IATA SSIM (1993-02/1994-09) say that the Holiday Islands (Hayman, Lindeman,
# Hamilton) observed DST for two years after the rest of Queensland stopped.
# Hamilton is the largest, but there is also a Hamilton in Victoria,
# so use Lindeman.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AQ 1971 only - Oct lastSun 2:00s 1:00 -
Rule AQ 1972 only - Feb lastSun 2:00s 0 -
Rule AQ 1989 1991 - Oct lastSun 2:00s 1:00 -
Rule AQ 1990 1992 - Mar Sun>=1 2:00s 0 -
Rule Holiday 1992 1993 - Oct lastSun 2:00s 1:00 -
Rule Holiday 1993 1994 - Mar Sun>=1 2:00s 0 -
Zone Australia/Brisbane 10:12:08 - LMT 1895
10:00 - EST 1917 Jan 1 0:01
10:00 Aus EST 1971
10:00 AQ EST
Zone Australia/Lindeman 9:55:56 - LMT 1895
10:00 - EST 1917 Jan 1 0:01
10:00 Aus EST 1971
10:00 AQ EST 1992 Jul
10:00 Holiday EST
# South Australia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AS 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AS 1986 only - Oct 19 2:00s 1:00 -
Rule AS 1987 max - Oct lastSun 2:00s 1:00 -
Rule AS 1972 only - Feb 27 2:00s 0 -
Rule AS 1973 1985 - Mar Sun>=1 2:00s 0 -
Rule AS 1986 1989 - Mar Sun>=15 2:00s 0 -
Rule AS 1990 1994 even Mar Sun>=18 2:00s 0 -
Rule AS 1990 1994 odd Mar Sun>=1 2:00s 0 -
Rule AS 1995 max - Mar lastSun 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Adelaide 9:14:20 - LMT 1895 Feb
9:00 - CST 1899 May
9:30 - CST 1917 Jan 1 0:01
9:30 Aus CST 1971
9:30 AS CST
# Tasmania
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AT 1967 only - Oct Sun>=1 2:00s 1:00 -
Rule AT 1968 only - Mar lastSun 2:00s 0 -
Rule AT 1968 1985 - Oct lastSun 2:00s 1:00 -
Rule AT 1969 1971 - Mar Sun>=8 2:00s 0 -
Rule AT 1972 only - Feb lastSun 2:00s 0 -
Rule AT 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule AT 1982 1983 - Mar lastSun 2:00s 0 -
Rule AT 1984 1986 - Mar Sun>=1 2:00s 0 -
Rule AT 1986 only - Oct Sun>=15 2:00s 1:00 -
Rule AT 1987 1990 - Mar Sun>=15 2:00s 0 -
Rule AT 1987 only - Oct Sun>=22 2:00s 1:00 -
Rule AT 1988 1990 - Oct lastSun 2:00s 1:00 -
Rule AT 1991 max - Oct Sun>=1 2:00s 1:00 -
Rule AT 1991 max - Mar lastSun 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Hobart 9:49:16 - LMT 1895 Sep
10:00 - EST 1917 Jan 1 0:01
10:00 Aus EST 1967
10:00 AT EST
# Victoria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AV 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AV 1972 only - Feb lastSun 2:00s 0 -
Rule AV 1973 1985 - Mar Sun>=1 2:00s 0 -
Rule AV 1986 1990 - Mar Sun>=15 2:00s 0 -
Rule AV 1986 1987 - Oct Sun>=15 2:00s 1:00 -
Rule AV 1988 1999 - Oct lastSun 2:00s 1:00 -
Rule AV 1991 1994 - Mar Sun>=1 2:00s 0 -
Rule AV 1995 max - Mar lastSun 2:00s 0 -
Rule AV 2000 only - Aug lastSun 2:00s 1:00 -
Rule AV 2001 max - Oct lastSun 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Melbourne 9:39:52 - LMT 1895 Feb
10:00 - EST 1917 Jan 1 0:01
10:00 Aus EST 1971
10:00 AV EST
# New South Wales
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AN 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AN 1972 only - Feb 27 2:00s 0 -
Rule AN 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule AN 1982 only - Apr Sun>=1 2:00s 0 -
Rule AN 1983 1985 - Mar Sun>=1 2:00s 0 -
Rule AN 1986 1989 - Mar Sun>=15 2:00s 0 -
Rule AN 1986 only - Oct 19 2:00s 1:00 -
Rule AN 1987 1999 - Oct lastSun 2:00s 1:00 -
Rule AN 1990 1995 - Mar Sun>=1 2:00s 0 -
Rule AN 1996 max - Mar lastSun 2:00s 0 -
Rule AN 2000 only - Aug lastSun 2:00s 1:00 -
Rule AN 2001 max - Oct lastSun 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Sydney 10:04:52 - LMT 1895 Feb
10:00 - EST 1917 Jan 1 0:01
10:00 Aus EST 1971
10:00 AN EST
Zone Australia/Broken_Hill 9:25:48 - LMT 1895 Feb
10:00 - EST 1896 Aug 23
9:00 - CST 1899 May
9:30 - CST 1917 Jan 1 0:01
9:30 Aus CST 1971
9:30 AN CST 2000
9:30 AS CST
# Lord Howe Island
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule LH 1981 1984 - Oct lastSun 2:00s 1:00 -
Rule LH 1982 1985 - Mar Sun>=1 2:00s 0 -
Rule LH 1985 only - Oct lastSun 2:00s 0:30 -
Rule LH 1986 1989 - Mar Sun>=15 2:00s 0 -
Rule LH 1986 only - Oct 19 2:00s 0:30 -
Rule LH 1987 max - Oct lastSun 2:00s 0:30 -
Rule LH 1990 1995 - Mar Sun>=1 2:00s 0 -
Rule LH 1996 max - Mar lastSun 2:00s 0 -
Zone Australia/Lord_Howe 10:36:20 - LMT 1895 Feb
10:00 - EST 1981 Mar
10:30 LH LHST
# Australian miscellany
#
# Ashmore Is, Cartier
# no indigenous inhabitants; only seasonal caretakers
# no information; probably like Australia/Perth
#
# Coral Sea Is
# no indigenous inhabitants; only meteorologists
# no information
#
# Macquarie
# permanent occupation (scientific station) since 1948;
# sealing and penguin oil station operated 1888/1917
# <a href="http://www.bom.gov.au/faq/faqgen.htm">
# The Australian Bureau of Meteorology FAQ
# </a> (1999-09-27) writes that Macquarie Island follows Tasmanian practice
# irrespective of any local use of DST. This is unclear; ignore it for now.
#
# Manihiki, Penrhyn, Rakehanga
# no information
# Christmas
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Christmas 7:02:52 - LMT 1895 Feb
7:00 - CXT # Christmas Island Time
# Cook Is
# From Shanks (1995):
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cook 1978 only - Nov 12 0:00 0:30 HS
Rule Cook 1979 1991 - Mar Sun>=1 0:00 0 -
Rule Cook 1979 1990 - Oct lastSun 0:00 0:30 HS
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Rarotonga -10:39:04 - LMT 1901 # Avarua
-10:30 - CKT 1978 Nov 12 # Cook Is Time
-10:00 Cook CK%sT
# Cocos
# From USNO (1989):
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Cocos 6:30 - CCT # Cocos Islands Time
# Fiji
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Fiji 1998 max - Nov Sun>=1 2:00 1:00 S
Rule Fiji 1999 max - Feb lastSun 3:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fiji 11:53:40 - LMT 1915 Oct 26 # Suva
12:00 Fiji FJ%sT # Fiji Time
# French Polynesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Gambier -8:59:48 - LMT 1912 Oct # Rikitea
-9:00 - GAMT # Gambier Time
Zone Pacific/Marquesas -9:18:00 - LMT 1912 Oct
-9:30 - MART # Marquesas Time
Zone Pacific/Tahiti -9:58:16 - LMT 1912 Oct # Papeete
-10:00 - TAHT # Tahiti Time
# Clipperton (near North America) is administered from French Polynesia;
# it is uninhabited.
# Guam
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guam 9:39:00 - LMT 1901 # Agana
10:00 - GST
# Kiribati
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Tarawa 11:32:04 - LMT 1901 # Bairiki
12:00 - GILT # Gilbert Is Time
Zone Pacific/Enderbury -11:24:20 - LMT 1901
-12:00 - PHOT 1979 Oct # Phoenix Is Time
-11:00 - PHOT 1995
13:00 - PHOT
Zone Pacific/Kiritimati -10:29:20 - LMT 1901
-10:40 - LINT 1979 Oct # Line Is Time
-10:00 - LINT 1995
14:00 - LINT
# N Mariana Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Saipan 9:43:00 - LMT 1901
9:00 - MPT 1969 Oct # N Mariana Is Time
10:00 - MPT
# Marshall Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Majuro 11:24:48 - LMT 1901
11:00 - MHT 1969 Oct # Marshall Islands Time
12:00 - MHT
Zone Pacific/Kwajalein 11:09:20 - LMT 1901
11:00 - MHT 1969 Oct
-12:00 - KWAT 1993 Aug 20 # Kwajalein Time
12:00 - MHT
# Micronesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Yap 9:12:32 - LMT 1901 # Colonia
9:00 - YAPT 1969 Oct # Yap Time
10:00 - YAPT
Zone Pacific/Truk 10:07:08 - LMT 1901
10:00 - TRUT # Truk Time
Zone Pacific/Ponape 10:32:52 - LMT 1901 # Kolonia
11:00 - PONT # Ponape Time
Zone Pacific/Kosrae 10:51:56 - LMT 1901
11:00 - PONT 1969 Oct
12:00 - KOST # Kosrae Time
# Nauru
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Nauru 11:07:40 - LMT 1921 Jan 15 # Uaobe
11:30 - NRT 1942 Mar 15 # Nauru Time
9:00 - JST 1944 Aug 15
11:30 - NRT 1979 May
12:00 - NRT
# New Caledonia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NC 1977 1978 - Dec Sun>=1 0:00 1:00 S
Rule NC 1978 1979 - Feb 27 0:00 0 -
Rule NC 1996 only - Dec 1 2:00s 1:00 S
Rule NC 1997 only - Mar 2 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Noumea 11:05:48 - LMT 1912 Jan 13
11:00 NC NC%sT
###############################################################################
# New Zealand
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Shanks gives 1927 Nov 6 - 1928 Mar 4, 1928 Oct 14 - 1929 Mar 17,
# 1929 Oct 13 - 1930 Mar 16; go with Whitman.
Rule NZ 1927 only - Nov 26 2:00 0:30 HD
Rule NZ 1928 1929 - Mar Sun>=1 2:00 0 S
Rule NZ 1928 only - Nov 4 2:00 0:30 HD
Rule NZ 1929 only - Oct 30 2:00 0:30 HD
Rule NZ 1930 1933 - Mar Sun>=15 2:00 0 S
Rule NZ 1930 1933 - Oct Sun>=8 2:00 0:30 HD
# Whitman says DST went on and off during war years, and the base UT offset
# didn't change until 1945 Apr 30; go with Shanks.
Rule NZ 1934 1940 - Apr lastSun 2:00 0 S
Rule NZ 1934 1939 - Sep lastSun 2:00 0:30 HD
Rule NZ 1974 only - Nov 3 2:00s 1:00 D
Rule NZ 1975 1988 - Oct lastSun 2:00s 1:00 D
Rule NZ 1989 only - Oct 8 2:00s 1:00 D
Rule NZ 1990 max - Oct Sun>=1 2:00s 1:00 D
Rule NZ 1975 only - Feb 23 2:00s 0 S
Rule NZ 1976 1989 - Mar Sun>=1 2:00s 0 S
Rule NZ 1990 max - Mar Sun>=15 2:00s 0 S
Rule Chatham 1990 max - Oct Sun>=1 2:45s 1:00 D
Rule Chatham 1991 max - Mar Sun>=15 2:45s 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Auckland 11:39:04 - LMT 1868
11:30 NZ NZ%sT 1940 Sep 29 2:00
12:00 NZ NZ%sT
Zone Pacific/Chatham 12:45 Chatham CHA%sT
# Auckland Is
# uninhabited; Maori and Moriori, colonial settlers, pastoralists, sealers,
# and scientific personnel have wintered
# Campbell I
# minor whaling stations operated 1909/1914
# scientific station operated 1941/1995;
# previously whalers, sealers, pastoralists, and scientific personnel wintered
# was probably like Pacific/Auckland
###############################################################################
# Niue
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Niue -11:19:40 - LMT 1901 # Alofi
-11:20 - NUT 1951 # Niue Time
-11:30 - NUT 1978 Oct 1
-11:00 - NUT
# Norfolk
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Norfolk 11:11:52 - LMT 1901 # Kingston
11:12 - NMT 1951 # Norfolk Mean Time
11:30 - NFT # Norfolk Time
# Palau
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Palau 8:57:56 - LMT 1901 # Koror
9:00 - PWT # Palau Time
# Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Port_Moresby 9:48:40 - LMT 1880
9:48:40 - PMMT 1895 # Port Moresby Mean Time
10:00 - PGT # Papua New Guinea Time
# Pitcairn
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Pitcairn -8:40:20 - LMT 1901 # Adamstown
-8:30 - PNT # Pitcairn Time
# American Samoa
Zone Pacific/Pago_Pago 12:37:12 - LMT 1879 Jul 5
-11:22:48 - LMT 1911
-11:30 - SAMT 1950 # Samoa Time
-11:00 - NST 1967 Apr # N=Nome
-11:00 - BST 1983 Nov 30 # B=Bering
-11:00 - SST # S=Samoa
# W Samoa
Zone Pacific/Apia 12:33:04 - LMT 1879 Jul 5
-11:26:56 - LMT 1911
-11:30 - SAMT 1950 # Samoa Time
-11:00 - WST # W Samoa Time
# Solomon Is
# excludes Bougainville, for which see Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guadalcanal 10:39:48 - LMT 1912 Oct # Honiara
11:00 - SBT # Solomon Is Time
# Tokelau Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fakaofo -11:24:56 - LMT 1901
-10:00 - TKT # Tokelau Time
# Tonga
# Transition time is a guess--see below
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Tonga 1999 max - Oct Sat>=1 0:00 1:00 S
Rule Tonga 2000 max - Apr Sat>=15 0:00 - -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Tongatapu 12:19:20 - LMT 1901
12:20 - TOT 1941 # Tonga Time
13:00 - TOT 1999
13:00 Tonga TO%sT
# Tuvalu
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Funafuti 11:56:52 - LMT 1901
12:00 - TVT # Tuvalu Time
# US minor outlying islands
# Howland, Baker
# uninhabited since World War II
# no information; was probably like Pacific/Pago_Pago
# Jarvis
# uninhabited since 1958
# no information; was probably like Pacific/Kiritimati
# Johnston
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Johnston -10:00 - HST
# Kingman
# uninhabited
# Midway
Zone Pacific/Midway -11:49:28 - LMT 1901
-11:00 - NST 1967 Apr # N=Nome
-11:00 - BST 1983 Nov 30 # B=Bering
-11:00 - SST # S=Samoa
# Palmyra
# uninhabited since World War II; was probably like Pacific/Kiritimati
# Wake
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Wake 11:06:28 - LMT 1901
12:00 - WAKT # Wake Time
# Vanuatu
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Vanuatu 1983 only - Sep 25 0:00 1:00 S
Rule Vanuatu 1984 1991 - Mar Sun>=23 0:00 0 -
Rule Vanuatu 1984 only - Oct 23 0:00 1:00 S
Rule Vanuatu 1985 1991 - Sep Sun>=23 0:00 1:00 S
Rule Vanuatu 1992 1993 - Jan Sun>=23 0:00 0 -
Rule Vanuatu 1992 only - Oct Sun>=23 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Efate 11:13:16 - LMT 1912 Jan 13 # Vila
11:00 Vanuatu VU%sT # Vanuatu Time
# Wallis and Futuna
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Wallis 12:15:20 - LMT 1901
12:00 - WFT # Wallis & Futuna Time
###############################################################################
# NOTES
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-03-22):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (4th edition),
# San Diego: ACS Publications, Inc. (1995).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst
# LMT Local Mean Time
# 8:00 WST WST Western Australia
# 9:00 JST Japan
# 9:30 CST CST Central Australia
# 10:00 EST EST Eastern Australia
# 10:00 GST Guam
# 10:30 LHST LHST Lord Howe*
# 12:00 NZST NZDT New Zealand
# 12:45 CHAST CHADT Chatham*
# -11:00 SST Samoa
# -10:00 HST Hawaii
#
# See the `northamerica' file for Hawaii.
# See the `southamerica' file for Easter I and the Galapagos Is.
###############################################################################
# Australia
# From John Mackin (1991-03-06):
# We in Australia have _never_ referred to DST as `daylight' time.
# It is called `summer' time. Now by a happy coincidence, `summer'
# and `standard' happen to start with the same letter; hence, the
# abbreviation does _not_ change...
# The legislation does not actually define abbreviations, at least
# in this State, but the abbreviation is just commonly taken to be the
# initials of the phrase, and the legislation here uniformly uses
# the phrase `summer time' and does not use the phrase `daylight
# time'.
# Announcers on the Commonwealth radio network, the ABC (for Australian
# Broadcasting Commission), use the phrases `Eastern Standard Time'
# or `Eastern Summer Time'. (Note, though, that as I say in the
# current australasia file, there is really no such thing.) Announcers
# on its overseas service, Radio Australia, use the same phrases
# prefixed by the word `Australian' when referring to local times;
# time announcements on that service, naturally enough, are made in UTC.
# From Arthur David Olson (March 8 1992):
# Given the above, what's chosen for year-round use is:
# CST for any place operating at a GMTOFF of 9:30
# WST for any place operating at a GMTOFF of 8:00
# EST for any place operating at a GMTOFF of 10:00
# From Paul Eggert (1995-12-19):
# Shanks reports 2:00 for all autumn changes in Australia and New Zealand.
# Mark Prior <mrp@itd.adelaide.edu.au> writes that his newspaper
# reports that NSW's fall 1995 change will occur at 2:00,
# but Robert Elz says it's been 3:00 in Victoria since 1970
# and perhaps the newspaper's `2:00' is referring to standard time.
# For now we'll continue to assume 2:00s for changes since 1960.
# From Eric Ulevik <eau@zip.com.au> (1998-01-05):
#
# Here are some URLs to Australian time legislation. These URLs are stable,
# and should probably be included in the data file. There are probably more
# relevant entries in this database.
#
# NSW (including LHI and Broken Hill):
# <a href="http://www.austlii.edu.au/au/legis/nsw/consol_act/sta1987137/index.html">
# Standard Time Act 1987 (updated 1995-04-04)
# </a>
# ACT
# <a href="http://www.austlii.edu.au/au/legis/act/consol_act/stasta1972279/index.html">
# Standard Time and Summer Time Act 1972
# </a>
# SA
# <a href="http://www.austlii.edu.au/au/legis/sa/consol_act/sta1898137/index.html">
# Standard Time Act, 1898
# </a>
# Northern Territory
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The NORTHERN TERRITORY.. [ Courtesy N.T. Dept of the Chief Minister ]
# # [ Nov 1990 ]
# # N.T. have never utilised any DST due to sub-tropical/tropical location.
# ...
# Zone Australia/North 9:30 - CST
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# the Northern Territory do[es] not have daylight saving.
# Western Australia
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of WESTERN AUSTRALIA.. [ Courtesy W.A. dept Premier+Cabinet ]
# # [ Nov 1990 ]
# # W.A. suffers from a great deal of public and political opposition to
# # DST in principle. A bill is brought before parliament in most years, but
# # usually defeated either in the upper house, or in party caucus
# # before reaching parliament.
# ...
# Zone Australia/West 8:00 AW %sST
# ...
# Rule AW 1974 only - Oct lastSun 2:00 1:00 D
# Rule AW 1975 only - Mar Sun>=1 3:00 0 W
# Rule AW 1983 only - Oct lastSun 2:00 1:00 D
# Rule AW 1984 only - Mar Sun>=1 3:00 0 W
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# Western Australia...do[es] not have daylight saving.
# From John D. Newman via Bradley White (1991-11-02):
# Western Australia is still on "winter time". Some DH in Sydney
# rang me at home a few days ago at 6.00am. (He had just arrived at
# work at 9.00am.)
# W.A. is switching to Summer Time on Nov 17th just to confuse
# everybody again.
# From Arthur David Olson (1992-03-08):
# The 1992 ending date used in the rules is a best guess;
# it matches what was used in the past.
# <a href="http://www.bom.gov.au/faq/faqgen.htm">
# The Australian Bureau of Meteorology FAQ
# </a> (1999-09-27) writes that Giles Meteorological Station uses
# South Australian time even though it's located in Western Australia.
# Queensland
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of QUEENSLAND.. [ Courtesy Qld. Dept Premier Econ&Trade Devel ]
# # [ Dec 1990 ]
# ...
# Zone Australia/Queensland 10:00 AQ %sST
# ...
# Rule AQ 1971 only - Oct lastSun 2:00 1:00 D
# Rule AQ 1972 only - Feb lastSun 3:00 0 E
# Rule AQ 1989 max - Oct lastSun 2:00 1:00 D
# Rule AQ 1990 max - Mar Sun>=1 3:00 0 E
# From Bradley White (1989-12-24):
# "Australia/Queensland" now observes daylight time (i.e. from
# October 1989).
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# ...Queensland...[has] agreed to end daylight saving
# at 3am tomorrow (March 3)...
# From John Mackin (1991-03-06):
# I can certainly confirm for my part that Daylight Saving in NSW did in fact
# end on Sunday, 3 March. I don't know at what hour, though. (It surprised
# me.)
# From Bradley White (1992-03-08):
# ...there was recently a referendum in Queensland which resulted
# in the experimental daylight saving system being abandoned. So, ...
# ...
# Rule QLD 1989 1991 - Oct lastSun 2:00 1:00 D
# Rule QLD 1990 1992 - Mar Sun>=1 3:00 0 S
# ...
# From Arthur David Olson (1992-03-08):
# The chosen rules the union of the 1971/1972 change and the 1989-1992 changes.
# South Australia, Tasmania, Victoria
# From Arthur David Olson (1992-03-08):
# The rules from version 7.1 follow.
# There are lots of differences between these rules and
# the Shepherd et al. rules. Since the Shepherd et al. rules
# and Bradley White's newspaper article are in agreement on
# current DST ending dates, no worries.
#
# Rule Oz 1971 1985 - Oct lastSun 2:00 1:00 -
# Rule Oz 1986 max - Oct Sun<=24 2:00 1:00 -
# Rule Oz 1972 only - Feb 27 3:00 0 -
# Rule Oz 1973 1986 - Mar Sun>=1 3:00 0 -
# Rule Oz 1987 max - Mar Sun<=21 3:00 0 -
# Zone Australia/Tasmania 10:00 Oz EST
# Zone Australia/South 9:30 Oz CST
# Zone Australia/Victoria 10:00 Oz EST 1985 Oct lastSun 2:00
# 10:00 1:00 EST 1986 Mar Sun<=21 3:00
# 10:00 Oz EST
# From Robert Elz (1991-03-06):
# I believe that the current start date for DST is "lastSun" in Oct...
# that changed Oct 89. That is, we're back to the
# original rule, and that rule currently applies in all the states
# that have dst, incl Qld. (Certainly it was true in Vic).
# The file I'm including says that happened in 1988, I think
# that's incorrect, but I'm not 100% certain.
# South Australia
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# ...South Australia...[has] agreed to end daylight saving
# at 3am tomorrow (March 3)...
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of SOUTH AUSTRALIA....[ Courtesy of S.A. Dept of Labour ]
# # [ Nov 1990 ]
# ...
# Zone Australia/South 9:30 AS %sST
# ...
# Rule AS 1971 max - Oct lastSun 2:00 1:00 D
# Rule AS 1972 1985 - Mar Sun>=1 3:00 0 C
# Rule AS 1986 1990 - Mar Sun<=21 3:00 0 C
# Rule AS 1991 max - Mar Sun>=1 3:00 0 C
# From Bradley White (1992-03-11):
# Recent correspondence with a friend in Adelaide
# contained the following exchange: "Due to the Adelaide Festival,
# South Australia delays setting back our clocks for a few weeks."
# From Robert Elz (1992-03-13):
# I heard that apparently (or at least, it appears that)
# South Aus will have an extra 3 weeks daylight saving every even
# numbered year (from 1990). That's when the Adelaide Festival
# is on...
# From Robert Elz (1992-03-16, 00:57:07 +1000):
# DST didn't end in Adelaide today (yesterday)....
# But whether it's "4th Sunday" or "2nd last Sunday" I have no idea whatever...
# (it's just as likely to be "the Sunday we pick for this year"...).
# From Bradley White (1994-04-11):
# If Sun, 15 March, 1992 was at +1030 as kre asserts, but yet Sun, 20 March,
# 1994 was at +0930 as John Connolly's customer seems to assert, then I can
# only conclude that the actual rule is more complicated....
# From John Warburton <jwarb@SACBH.com.au> (1994-10-07):
# The new Daylight Savings dates for South Australia ...
# was gazetted in the Government Hansard on Sep 26 1994....
# start on last Sunday in October and end in last sunday in March.
# Tasmania
# The rules for 1967 through 1991 were reported by George Shepherd
# via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of TASMANIA.. [Courtesy Tasmanian Dept of Premier + Cabinet ]
# # [ Nov 1990 ]
# From Bill Hart via Guy Harris (1991-10-10):
# Oh yes, the new daylight savings rules are uniquely tasmanian, we have
# 6 weeks a year now when we are out of sync with the rest of Australia
# (but nothing new about that).
# Victoria
# The rules for 1971 through 1991 were reported by George Shepherd
# via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of VICTORIA.. [ Courtesy of Vic. Dept of Premier + Cabinet ]
# # [ Nov 1990 ]
# New South Wales
# From Arthur David Olson:
# New South Wales and subjurisdictions have their own ideas of a fun time.
# Based on law library research by John Mackin (john@basser.cs.su.oz),
# who notes:
# In Australia, time is not legislated federally, but rather by the
# individual states. Thus, while such terms as ``Eastern Standard Time''
# [I mean, of course, Australian EST, not any other kind] are in common
# use, _they have NO REAL MEANING_, as they are not defined in the
# legislation. This is very important to understand.
# I have researched New South Wales time only...
# From Paul Eggert (1999-09-27):
# The Information Service of the Australian National Standards Commission
# <a href="http://www.nsc.gov.au/InfoServ/Ileaflet/il27.htm">
# Daylight Saving
# </a> page (1995-04) has an excellent overall history of Australian DST.
# The Community Relations Division of the NSW Attorney General's Department
# publishes a history of daylight saving in NSW. See:
# <a href="http://www.lawlink.nsw.gov.au/crd.nsf/pages/time2">
# Lawlink NSW: Daylight Saving in New South Wales
# </a>
# From Eric Ulevik <eau@ozemail.com.au> (1999-05-26):
# DST will start in NSW on the last Sunday of August, rather than the usual
# October in 2000. [See: Matthew Moore,
# <a href="http://www.smh.com.au/news/9905/26/pageone/pageone4.html">
# Two months more daylight saving
# </a>
# Sydney Morning Herald (1999-05-26).]
# From Paul Eggert (1999-09-27):
# See the following official NSW source:
# <a href="http://dir.gis.nsw.gov.au/cgi-bin/genobject/document/other/daylightsaving/tigGmZ">
# Daylight Saving in New South Wales.
# </a>
#
# Narrabri Shire (NSW) council has announced it will ignore the extension of
# daylight saving next year. See:
# <a href="http://abc.net.au/news/regionals/neweng/monthly/regeng-22jul1999-1.htm">
# Narrabri Council to ignore daylight saving
# </a> (1999-07-22). For now, we'll wait to see if this really happens.
#
# Victoria will following NSW. See:
# <a href="http://abc.net.au/local/news/olympics/1999/07/item19990728112314_1.htm">
# Vic to extend daylight saving
# </a> (1999-07-28).
#
# However, South Australia rejected the DST request. See:
# <a href="http://abc.net.au/news/olympics/1999/07/item19990719151754_1.htm">
# South Australia rejects Olympics daylight savings request
# </a> (1999-07-19).
#
# Queensland also will not observe DST for the Olympics. See:
# <a href="http://abc.net.au/news/olympics/1999/06/item19990601114608_1.htm">
# Qld says no to daylight savings for Olympics
# </a> (1999-06-01), which quotes Queensland Premier Peter Beattie as saying
# ``Look you've got to remember in my family when this came up last time
# I voted for it, my wife voted against it and she said to me it's all very
# well for you, you don't have to worry about getting the children out of
# bed, getting them to school, getting them to sleep at night.
# I've been through all this argument domestically...my wife rules.''
#
# Broken Hill will stick with South Australian time in 2000. See:
# <a href="http://abc.net.au/news/regionals/brokenh/monthly/regbrok-21jul1999-6.htm">
# Broken Hill to be behind the times
# </a> (1999-07-21).
# IATA SSIM (1998-09) says that the spring 2000 change for Australian
# Capital Territory, New South Wales except Lord Howe Island and Broken
# Hill, and Victoria will be August 27, presumably due to the Sydney Olympics.
# Yancowinna
# From John Basser (1989-01-04):
# `Broken Hill' means the County of Yancowinna.
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # YANCOWINNA.. [ Confirmation courtesy of Broken Hill Postmaster ]
# # [ Dec 1990 ]
# ...
# # Yancowinna uses Central Standard Time, despite [its] location on the
# # New South Wales side of the S.A. border. Most business and social dealings
# # are with CST zones, therefore CST is legislated by local government
# # although the switch to Summer Time occurs in line with N.S.W. There have
# # been years when this did not apply, but the historical data is not
# # presently available.
# Zone Australia/Yancowinna 9:30 AY %sST
# ...
# Rule AY 1971 1985 - Oct lastSun 2:00 1:00 D
# Rule AY 1972 only - Feb lastSun 3:00 0 C
# [followed by other Rules]
# Lord Howe Island
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# LHI... [ Courtesy of Pauline Van Winsen.. pauline@Aus ]
# [ Dec 1990 ]
# Lord Howe Island is located off the New South Wales coast, and is half an
# hour ahead of NSW time.
# From Paul Eggert (1995-12-19):
# For Lord Howe we use Shanks through 1991.
# Lord Howe is part of NSW, so we'll guess it has used the same transition
# times as NSW since 1991, even though Shanks writes that Lord Howe went
# with Victoria when NSW and Victoria disagreed in 1982.
###############################################################################
# New Zealand, from Elz' asia 1.1
# Elz says "no guarantees"
# From Mark Davies (1990-10-03):
# the 1989/90 year was a trial of an extended "daylight saving" period.
# This trial was deemed successful and the extended period adopted for
# subsequent years (with the addition of a further week at the start).
# source -- phone call to Ministry of Internal Affairs Head Office.
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The Country of New Zealand (Australia's east island -) Gee they hate that!
# # or is Australia the west island of N.Z.
# # [ courtesy of Geoff Tribble.. Geofft@Aus.. Auckland N.Z. ]
# # [ Nov 1990 ]
# ...
# Rule NZ 1974 1988 - Oct lastSun 2:00 1:00 D
# Rule NZ 1989 max - Oct Sun>=1 2:00 1:00 D
# Rule NZ 1975 1989 - Mar Sun>=1 3:00 0 S
# Rule NZ 1990 max - Mar lastSun 3:00 0 S
# ...
# Zone NZ 12:00 NZ NZ%sT # New Zealand
# Zone NZ-CHAT 12:45 - NZ-CHAT # Chatham Island
# From Arthur David Olson (1992-03-08):
# The chosen rules use the Davies October 8 values for the start of DST in 1989
# rather than the October 1 value.
# From Paul Eggert (1995-12-19);
# Shanks reports 2:00 for all autumn changes in Australia and New Zealand.
# Robert Uzgalis <buz@cs.aukuni.ac.nz> writes that the New Zealand Daylight
# Savings Time Order in Council dated 1990-06-18 specifies 2:00 standard
# time on both the first Sunday in October and the third Sunday in March.
# As with Australia, we'll assume the tradition is 2:00s, not 2:00.
#
# From Paul Eggert (1996-11-22):
# Shanks gives no data for Chatham; usno1989 says it's +12:45,
# usno1995 says it's +12:45/+13:45, and IATA SSIM (1991/1996)
# gives the NZ rules but with transitions at 2:45 local standard time.
# Guess that they adopted DST in 1990.
###############################################################################
# Fiji
# Howse writes (p 153) that in 1879 the British governor of Fiji
# enacted an ordinance standardizing the islands on Antipodean Time
# instead of the American system (which was one day behind).
# From Rives McDow (1998-10-08):
# Fiji will introduce DST effective 0200 local time, 1998-11-01
# until 0300 local time 1999-02-28. Each year the DST period will
# be from the first Sunday in November until the last Sunday in February.
# From the BBC World Service (1998-10-31 11:32 UTC):
# The Fijiian government says the main reasons for the time change is to
# improve productivity and reduce road accidents. But correspondents say it
# also hopes the move will boost Fiji's ability to compete with other pacific
# islands in the effort to attract tourists to witness the dawning of the new
# millenium.
# Johnston
# Johnston data is from usno1995.
# Kiribati
# From Paul Eggert (1996-01-22):
# Today's _Wall Street Journal_ (page 1) reports that Kiribati
# ``declared it the same day throught the country as of Jan. 1, 1995''
# as part of the competition to be first into the 21st century.
# Kwajalein
# In comp.risks 14.87 (26 August 1993), Peter Neumann writes:
# I wonder what happened in Kwajalein, where there was NO Friday,
# 1993-08-20. Thursday night at midnight Kwajalein switched sides with
# respect to the International Date Line, to rejoin its fellow islands,
# going from 11:59 p.m. Thursday to 12:00 m. Saturday in a blink.
# N Mariana Is, Guam
# Howse writes (p 153) ``The Spaniards, on the other hand, reached the
# Philippines and the Ladrones from America,'' and implies that the Ladrones
# (now called the Marianas) kept American date for quite some time.
# Ignore this for now, as we have no hard data. See also Asia/Manila.
# Micronesia
# Alan Eugene Davis <adavis@kuentos.guam.net> writes (1996-03-16),
# ``I am certain, having lived there for the past decade, that "Truk"
# (now properly known as Chuuk) ... is in the time zone GMT+10.''
#
# Shanks writes that Truk switched from GMT+10 to GMT+11 on 1978-10-01;
# ignore this for now.
# Samoa
# Howse writes (p 153, citing p 10 of the 1883-11-18 New York Herald)
# that in 1879 the King of Samoa decided to change
# ``the date in his kingdom from the Antipodean to the American system,
# ordaining -- by a masterpiece of diplomatic flattery -- that
# the Fourth of July should be celebrated twice in that year.''
# Tonga
# From Paul Eggert (1996-01-22):
# Today's _Wall Street Journal_ (p 1) reports that ``Tonga has been plotting
# to sneak ahead of [New Zealanders] by introducing daylight-saving time.''
# Since Kiribati has moved the Date Line it's not clear what Tonga will do.
# Don Mundell writes in the 1997-02-20 Tonga Chronicle
# <a href="http://www.tongatapu.net.to/tonga/homeland/timebegins.htm">
# How Tonga became `The Land where Time Begins'
# </a>:
# Until 1941 Tonga maintained a standard time 50 minutes ahead of NZST
# 12 hours and 20 minutes ahead of GMT. When New Zealand adjusted its
# standard time in 1940s, Tonga had the choice of subtracting from its
# local time to come on the same standard time as New Zealand or of
# advancing its time to maintain the differential of 13 degrees
# (approximately 50 minutes ahead of New Zealand time).
#
# Because His Majesty King Taufa'ahau Tupou IV, then Crown Prince
# Tungi, preferred to ensure Tonga's title as the land where time
# begins, the Legislative Assembly approved the latter change.
#
# But some of the older, more conservative members from the outer
# islands objected. "If at midnight on Dec. 31, we move ahead 40
# minutes, as your Royal Highness wishes, what becomes of the 40
# minutes we have lost?"
#
# The Crown Prince, presented an unanswerable argument: "Remember that
# on the World Day of Prayer, you would be the first people on Earth
# to say your prayers in the morning."
# From Paul Eggert (1999-08-12):
# Shanks says the transition was on 1968-10-01; go with Mundell.
# From Eric Ulevik (1999-05-03):
# Tonga's director of tourism, who is also secretary of the National Millenium
# Committee, has a plan to get Tonga back in front.
# He has proposed a one-off move to tropical daylight saving for Tonga from
# October to March, which has won approval in principle from the Tongan
# Government.
# From Steffen Thorsen [straen@thorsen.priv.no] (1999-09-09):
# * Tonga will introduce DST in November
#
# I was given this link by John Letts <johnletts@earthlink.net>:
# <a hef="http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm">
# http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm
# </a>
#
# I have not been able to find exact dates for the transition in November
# yet. By reading this article it seems like Fiji will be 14 hours ahead
# of UTC as well, but as far as I know Fiji will only be 13 hours ahead
# (12 + 1 hour DST).
# From Arthur David Olson [arthur_david_olson@nih.gov] (1999-09-20):
# According to <a href="http://www.tongaonline.com/news/sept1799.html>
# http://www.tongaonline.com/news/sept1799.html
# </a>:
# "Daylight Savings Time will take effect on Oct. 2 through April 15, 2000
# and annually thereafter from the first Saturday in October through the
# third Saturday of April. Under the system approved by Privy Council on
# Sept. 10, clocks must be turned ahead one hour on the opening day and
# set back an hour on the closing date."
# Alas, no indication of the time of day.
| # @(#)australasia 7.52
# This file also includes Pacific islands.
# Notes are at the end of this file
###############################################################################
# Australia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Aus 1917 only - Jan 1 0:01 1:00 -
Rule Aus 1917 only - Mar 25 2:00 0 -
Rule Aus 1942 only - Jan 1 2:00 1:00 -
Rule Aus 1942 only - Mar 29 2:00 0 -
Rule Aus 1942 only - Sep 27 2:00 1:00 -
Rule Aus 1943 1944 - Mar lastSun 2:00 0 -
Rule Aus 1943 only - Oct 3 2:00 1:00 -
# Go with Whitman and the Australian National Standards Commission, which
# says W Australia didn't use DST in 1943/1944. Ignore Whitman's claim that
# 1944/1945 was just like 1943/1944.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# Northern Territory
Zone Australia/Darwin 8:43:20 - LMT 1895 Feb
9:00 - CST 1899 May
9:30 Aus CST
# Western Australia
Zone Australia/Perth 7:43:24 - LMT 1895 Dec
8:00 Aus WST 1943 Jul
8:00 - WST 1974 Oct lastSun 2:00s
8:00 1:00 WST 1975 Mar Sun>=1 2:00s
8:00 - WST 1983 Oct lastSun 2:00s
8:00 1:00 WST 1984 Mar Sun>=1 2:00s
8:00 - WST 1991 Nov 17 2:00s
8:00 1:00 WST 1992 Mar Sun>=1 2:00s
8:00 - WST
# Queensland
#
# From Alex Livingston <alex@agsm.unsw.edu.au> (1996-11-01):
# I have heard or read more than once that some resort islands off the coast
# of Queensland chose to keep observing daylight-saving time even after
# Queensland ceased to.
#
# From Paul Eggert (1996-11-22):
# IATA SSIM (1993-02/1994-09) say that the Holiday Islands (Hayman, Lindeman,
# Hamilton) observed DST for two years after the rest of Queensland stopped.
# Hamilton is the largest, but there is also a Hamilton in Victoria,
# so use Lindeman.
#
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AQ 1971 only - Oct lastSun 2:00s 1:00 -
Rule AQ 1972 only - Feb lastSun 2:00s 0 -
Rule AQ 1989 1991 - Oct lastSun 2:00s 1:00 -
Rule AQ 1990 1992 - Mar Sun>=1 2:00s 0 -
Rule Holiday 1992 1993 - Oct lastSun 2:00s 1:00 -
Rule Holiday 1993 1994 - Mar Sun>=1 2:00s 0 -
Zone Australia/Brisbane 10:12:08 - LMT 1895
10:00 Aus EST 1971
10:00 AQ EST
Zone Australia/Lindeman 9:55:56 - LMT 1895
10:00 Aus EST 1971
10:00 AQ EST 1992 Jul
10:00 Holiday EST
# South Australia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AS 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AS 1986 only - Oct 19 2:00s 1:00 -
Rule AS 1987 max - Oct lastSun 2:00s 1:00 -
Rule AS 1972 only - Feb 27 2:00s 0 -
Rule AS 1973 1985 - Mar Sun>=1 2:00s 0 -
Rule AS 1986 1989 - Mar Sun>=15 2:00s 0 -
Rule AS 1990 1994 even Mar Sun>=18 2:00s 0 -
Rule AS 1990 1994 odd Mar Sun>=1 2:00s 0 -
Rule AS 1995 max - Mar lastSun 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Adelaide 9:14:20 - LMT 1895 Feb
9:00 - CST 1899 May
9:30 Aus CST 1971
9:30 AS CST
# Tasmania
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AT 1967 only - Oct Sun>=1 2:00s 1:00 -
Rule AT 1968 only - Mar lastSun 2:00s 0 -
Rule AT 1968 1985 - Oct lastSun 2:00s 1:00 -
Rule AT 1969 1971 - Mar Sun>=8 2:00s 0 -
Rule AT 1972 only - Feb lastSun 2:00s 0 -
Rule AT 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule AT 1982 1983 - Mar lastSun 2:00s 0 -
Rule AT 1984 1986 - Mar Sun>=1 2:00s 0 -
Rule AT 1986 only - Oct Sun>=15 2:00s 1:00 -
Rule AT 1987 1990 - Mar Sun>=15 2:00s 0 -
Rule AT 1987 only - Oct Sun>=22 2:00s 1:00 -
Rule AT 1988 1990 - Oct lastSun 2:00s 1:00 -
Rule AT 1991 1999 - Oct Sun>=1 2:00s 1:00 -
Rule AT 1991 max - Mar lastSun 2:00s 0 -
Rule AT 2000 only - Aug lastSun 2:00s 1:00 -
Rule AT 2001 max - Oct Sun>=1 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Hobart 9:49:16 - LMT 1895 Sep
10:00 - EST 1916 Oct 1 2:00
10:00 1:00 EST 1917 Feb
10:00 Aus EST 1967
10:00 AT EST
# Victoria
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AV 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AV 1972 only - Feb lastSun 2:00s 0 -
Rule AV 1973 1985 - Mar Sun>=1 2:00s 0 -
Rule AV 1986 1990 - Mar Sun>=15 2:00s 0 -
Rule AV 1986 1987 - Oct Sun>=15 2:00s 1:00 -
Rule AV 1988 1999 - Oct lastSun 2:00s 1:00 -
Rule AV 1991 1994 - Mar Sun>=1 2:00s 0 -
Rule AV 1995 max - Mar lastSun 2:00s 0 -
Rule AV 2000 only - Aug lastSun 2:00s 1:00 -
Rule AV 2001 max - Oct lastSun 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Melbourne 9:39:52 - LMT 1895 Feb
10:00 Aus EST 1971
10:00 AV EST
# New South Wales
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule AN 1971 1985 - Oct lastSun 2:00s 1:00 -
Rule AN 1972 only - Feb 27 2:00s 0 -
Rule AN 1973 1981 - Mar Sun>=1 2:00s 0 -
Rule AN 1982 only - Apr Sun>=1 2:00s 0 -
Rule AN 1983 1985 - Mar Sun>=1 2:00s 0 -
Rule AN 1986 1989 - Mar Sun>=15 2:00s 0 -
Rule AN 1986 only - Oct 19 2:00s 1:00 -
Rule AN 1987 1999 - Oct lastSun 2:00s 1:00 -
Rule AN 1990 1995 - Mar Sun>=1 2:00s 0 -
Rule AN 1996 max - Mar lastSun 2:00s 0 -
Rule AN 2000 only - Aug lastSun 2:00s 1:00 -
Rule AN 2001 max - Oct lastSun 2:00s 1:00 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Australia/Sydney 10:04:52 - LMT 1895 Feb
10:00 Aus EST 1971
10:00 AN EST
Zone Australia/Broken_Hill 9:25:48 - LMT 1895 Feb
10:00 - EST 1896 Aug 23
9:00 - CST 1899 May
9:30 Aus CST 1971
9:30 AN CST 2000
9:30 AS CST
# Lord Howe Island
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule LH 1981 1984 - Oct lastSun 2:00s 1:00 -
Rule LH 1982 1985 - Mar Sun>=1 2:00s 0 -
Rule LH 1985 only - Oct lastSun 2:00s 0:30 -
Rule LH 1986 1991 - Mar Sun>=15 2:00s 0 -
Rule LH 1986 only - Oct 19 2:00s 0:30 -
Rule LH 1987 max - Oct lastSun 2:00s 0:30 -
Rule LH 1992 1995 - Mar Sun>=1 2:00s 0 -
Rule LH 1996 max - Mar lastSun 2:00s 0 -
Zone Australia/Lord_Howe 10:36:20 - LMT 1895 Feb
10:00 - EST 1981 Mar
10:30 LH LHST
# Australian miscellany
#
# Ashmore Is, Cartier
# no indigenous inhabitants; only seasonal caretakers
# no information; probably like Australia/Perth
#
# Coral Sea Is
# no indigenous inhabitants; only meteorologists
# no information
#
# Macquarie
# permanent occupation (scientific station) since 1948;
# sealing and penguin oil station operated 1888/1917
# <a href="http://www.bom.gov.au/faq/faqgen.htm">
# The Australian Bureau of Meteorology FAQ
# </a> (1999-09-27) writes that Macquarie Island follows Tasmanian practice
# irrespective of any local use of DST. This is unclear; ignore it for now.
#
# Manihiki, Penrhyn, Rakehanga
# no information
# Christmas
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Christmas 7:02:52 - LMT 1895 Feb
7:00 - CXT # Christmas Island Time
# Cook Is
# From Shanks:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cook 1978 only - Nov 12 0:00 0:30 HS
Rule Cook 1979 1991 - Mar Sun>=1 0:00 0 -
Rule Cook 1979 1990 - Oct lastSun 0:00 0:30 HS
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Rarotonga -10:39:04 - LMT 1901 # Avarua
-10:30 - CKT 1978 Nov 12 # Cook Is Time
-10:00 Cook CK%sT
# Cocos
# From USNO (1989):
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Indian/Cocos 6:30 - CCT # Cocos Islands Time
# Fiji
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Fiji 1998 max - Nov Sun>=1 2:00 1:00 S
Rule Fiji 1999 max - Feb lastSun 3:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fiji 11:53:40 - LMT 1915 Oct 26 # Suva
12:00 Fiji FJ%sT # Fiji Time
# French Polynesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Gambier -8:59:48 - LMT 1912 Oct # Rikitea
-9:00 - GAMT # Gambier Time
Zone Pacific/Marquesas -9:18:00 - LMT 1912 Oct
-9:30 - MART # Marquesas Time
Zone Pacific/Tahiti -9:58:16 - LMT 1912 Oct # Papeete
-10:00 - TAHT # Tahiti Time
# Clipperton (near North America) is administered from French Polynesia;
# it is uninhabited.
# Guam
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guam 9:39:00 - LMT 1901 # Agana
10:00 - GST
# Kiribati
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Tarawa 11:32:04 - LMT 1901 # Bairiki
12:00 - GILT # Gilbert Is Time
Zone Pacific/Enderbury -11:24:20 - LMT 1901
-12:00 - PHOT 1979 Oct # Phoenix Is Time
-11:00 - PHOT 1995
13:00 - PHOT
Zone Pacific/Kiritimati -10:29:20 - LMT 1901
-10:40 - LINT 1979 Oct # Line Is Time
-10:00 - LINT 1995
14:00 - LINT
# N Mariana Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Saipan 9:43:00 - LMT 1901
9:00 - MPT 1969 Oct # N Mariana Is Time
10:00 - MPT
# Marshall Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Majuro 11:24:48 - LMT 1901
11:00 - MHT 1969 Oct # Marshall Islands Time
12:00 - MHT
Zone Pacific/Kwajalein 11:09:20 - LMT 1901
11:00 - MHT 1969 Oct
-12:00 - KWAT 1993 Aug 20 # Kwajalein Time
12:00 - MHT
# Micronesia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Yap 9:12:32 - LMT 1901 # Colonia
9:00 - YAPT 1969 Oct # Yap Time
10:00 - YAPT
Zone Pacific/Truk 10:07:08 - LMT 1901
10:00 - TRUT # Truk Time
Zone Pacific/Ponape 10:32:52 - LMT 1901 # Kolonia
11:00 - PONT # Ponape Time
Zone Pacific/Kosrae 10:51:56 - LMT 1901
11:00 - KOST 1969 Oct # Kosrae Time
12:00 - KOST 1999
11:00 - KOST
# Nauru
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Nauru 11:07:40 - LMT 1921 Jan 15 # Uaobe
11:30 - NRT 1942 Mar 15 # Nauru Time
9:00 - JST 1944 Aug 15
11:30 - NRT 1979 May
12:00 - NRT
# New Caledonia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule NC 1977 1978 - Dec Sun>=1 0:00 1:00 S
Rule NC 1978 1979 - Feb 27 0:00 0 -
Rule NC 1996 only - Dec 1 2:00s 1:00 S
# Shanks says the following was at 2:00; go with IATA.
Rule NC 1997 only - Mar 2 2:00s 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Noumea 11:05:48 - LMT 1912 Jan 13
11:00 NC NC%sT
###############################################################################
# New Zealand
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Shanks gives 1927 Nov 6 - 1928 Mar 4, 1928 Oct 14 - 1929 Mar 17,
# 1929 Oct 13 - 1930 Mar 16; go with Whitman.
Rule NZ 1927 only - Nov 26 2:00 0:30 HD
Rule NZ 1928 1929 - Mar Sun>=1 2:00 0 S
Rule NZ 1928 only - Nov 4 2:00 0:30 HD
Rule NZ 1929 only - Oct 30 2:00 0:30 HD
Rule NZ 1930 1933 - Mar Sun>=15 2:00 0 S
Rule NZ 1930 1933 - Oct Sun>=8 2:00 0:30 HD
# Whitman says DST went on and off during war years, and the base UT offset
# didn't change until 1945 Apr 30; go with Shanks.
Rule NZ 1934 1940 - Apr lastSun 2:00 0 S
Rule NZ 1934 1939 - Sep lastSun 2:00 0:30 HD
Rule NZ 1974 only - Nov 3 2:00s 1:00 D
Rule NZ 1975 1988 - Oct lastSun 2:00s 1:00 D
Rule NZ 1989 only - Oct 8 2:00s 1:00 D
Rule NZ 1990 max - Oct Sun>=1 2:00s 1:00 D
Rule NZ 1975 only - Feb 23 2:00s 0 S
Rule NZ 1976 1989 - Mar Sun>=1 2:00s 0 S
Rule NZ 1990 max - Mar Sun>=15 2:00s 0 S
Rule Chatham 1990 max - Oct Sun>=1 2:45s 1:00 D
Rule Chatham 1991 max - Mar Sun>=15 2:45s 0 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Auckland 11:39:04 - LMT 1868
11:30 NZ NZ%sT 1940 Sep 29 2:00
12:00 NZ NZ%sT
Zone Pacific/Chatham 12:45 Chatham CHA%sT
# Auckland Is
# uninhabited; Maori and Moriori, colonial settlers, pastoralists, sealers,
# and scientific personnel have wintered
# Campbell I
# minor whaling stations operated 1909/1914
# scientific station operated 1941/1995;
# previously whalers, sealers, pastoralists, and scientific personnel wintered
# was probably like Pacific/Auckland
###############################################################################
# Niue
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Niue -11:19:40 - LMT 1901 # Alofi
-11:20 - NUT 1951 # Niue Time
-11:30 - NUT 1978 Oct 1
-11:00 - NUT
# Norfolk
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Norfolk 11:11:52 - LMT 1901 # Kingston
11:12 - NMT 1951 # Norfolk Mean Time
11:30 - NFT # Norfolk Time
# Palau
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Palau 8:57:56 - LMT 1901 # Koror
9:00 - PWT # Palau Time
# Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Port_Moresby 9:48:40 - LMT 1880
9:48:40 - PMMT 1895 # Port Moresby Mean Time
10:00 - PGT # Papua New Guinea Time
# Pitcairn
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Pitcairn -8:40:20 - LMT 1901 # Adamstown
-8:30 - PNT # Pitcairn Time
# American Samoa
Zone Pacific/Pago_Pago 12:37:12 - LMT 1879 Jul 5
-11:22:48 - LMT 1911
-11:30 - SAMT 1950 # Samoa Time
-11:00 - NST 1967 Apr # N=Nome
-11:00 - BST 1983 Nov 30 # B=Bering
-11:00 - SST # S=Samoa
# W Samoa
Zone Pacific/Apia 12:33:04 - LMT 1879 Jul 5
-11:26:56 - LMT 1911
-11:30 - SAMT 1950 # Samoa Time
-11:00 - WST # W Samoa Time
# Solomon Is
# excludes Bougainville, for which see Papua New Guinea
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Guadalcanal 10:39:48 - LMT 1912 Oct # Honiara
11:00 - SBT # Solomon Is Time
# Tokelau Is
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Fakaofo -11:24:56 - LMT 1901
-10:00 - TKT # Tokelau Time
# Tonga
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Tonga 1999 max - Oct Sat>=1 2:00s 1:00 S
Rule Tonga 2000 max - Apr Sun>=16 2:00s - -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Tongatapu 12:19:20 - LMT 1901
12:20 - TOT 1941 # Tonga Time
13:00 - TOT 1999
13:00 Tonga TO%sT
# Tuvalu
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Funafuti 11:56:52 - LMT 1901
12:00 - TVT # Tuvalu Time
# US minor outlying islands
# Howland, Baker
# uninhabited since World War II
# no information; was probably like Pacific/Pago_Pago
# Jarvis
# uninhabited since 1958
# no information; was probably like Pacific/Kiritimati
# Johnston
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Johnston -10:00 - HST
# Kingman
# uninhabited
# Midway
Zone Pacific/Midway -11:49:28 - LMT 1901
-11:00 - NST 1967 Apr # N=Nome
-11:00 - BST 1983 Nov 30 # B=Bering
-11:00 - SST # S=Samoa
# Palmyra
# uninhabited since World War II; was probably like Pacific/Kiritimati
# Wake
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Wake 11:06:28 - LMT 1901
12:00 - WAKT # Wake Time
# Vanuatu
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Vanuatu 1983 only - Sep 25 0:00 1:00 S
Rule Vanuatu 1984 1991 - Mar Sun>=23 0:00 0 -
Rule Vanuatu 1984 only - Oct 23 0:00 1:00 S
Rule Vanuatu 1985 1991 - Sep Sun>=23 0:00 1:00 S
Rule Vanuatu 1992 1993 - Jan Sun>=23 0:00 0 -
Rule Vanuatu 1992 only - Oct Sun>=23 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Efate 11:13:16 - LMT 1912 Jan 13 # Vila
11:00 Vanuatu VU%sT # Vanuatu Time
# Wallis and Futuna
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Pacific/Wallis 12:15:20 - LMT 1901
12:00 - WFT # Wallis & Futuna Time
###############################################################################
# NOTES
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-10-29):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (5th edition),
# San Diego: ACS Publications, Inc. (1999).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Another source occasionally used is Edward W. Whitman, World Time Differences,
# Whitman Publishing Co, 2 Niagara Av, Ealing, London (undated), which
# I found in the UCLA library.
#
# A reliable and entertaining source about time zones is
# Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
#
# I invented the abbreviations marked `*' in the following table;
# the rest are from earlier versions of this file, or from other sources.
# Corrections are welcome!
# std dst
# LMT Local Mean Time
# 8:00 WST WST Western Australia
# 9:00 JST Japan
# 9:30 CST CST Central Australia
# 10:00 EST EST Eastern Australia
# 10:00 GST Guam
# 10:30 LHST LHST Lord Howe*
# 12:00 NZST NZDT New Zealand
# 12:45 CHAST CHADT Chatham*
# -11:00 SST Samoa
# -10:00 HST Hawaii
#
# See the `northamerica' file for Hawaii.
# See the `southamerica' file for Easter I and the Galapagos Is.
###############################################################################
# Australia
# From John Mackin (1991-03-06):
# We in Australia have _never_ referred to DST as `daylight' time.
# It is called `summer' time. Now by a happy coincidence, `summer'
# and `standard' happen to start with the same letter; hence, the
# abbreviation does _not_ change...
# The legislation does not actually define abbreviations, at least
# in this State, but the abbreviation is just commonly taken to be the
# initials of the phrase, and the legislation here uniformly uses
# the phrase `summer time' and does not use the phrase `daylight
# time'.
# Announcers on the Commonwealth radio network, the ABC (for Australian
# Broadcasting Commission), use the phrases `Eastern Standard Time'
# or `Eastern Summer Time'. (Note, though, that as I say in the
# current australasia file, there is really no such thing.) Announcers
# on its overseas service, Radio Australia, use the same phrases
# prefixed by the word `Australian' when referring to local times;
# time announcements on that service, naturally enough, are made in UTC.
# From Arthur David Olson (March 8 1992):
# Given the above, what's chosen for year-round use is:
# CST for any place operating at a GMTOFF of 9:30
# WST for any place operating at a GMTOFF of 8:00
# EST for any place operating at a GMTOFF of 10:00
# From Paul Eggert (1995-12-19):
# Shanks reports 2:00 for all autumn changes in Australia and New Zealand.
# Mark Prior <mrp@itd.adelaide.edu.au> writes that his newspaper
# reports that NSW's fall 1995 change will occur at 2:00,
# but Robert Elz says it's been 3:00 in Victoria since 1970
# and perhaps the newspaper's `2:00' is referring to standard time.
# For now we'll continue to assume 2:00s for changes since 1960.
# From Eric Ulevik <eau@zip.com.au> (1998-01-05):
#
# Here are some URLs to Australian time legislation. These URLs are stable,
# and should probably be included in the data file. There are probably more
# relevant entries in this database.
#
# NSW (including LHI and Broken Hill):
# <a href="http://www.austlii.edu.au/au/legis/nsw/consol_act/sta1987137/index.html">
# Standard Time Act 1987 (updated 1995-04-04)
# </a>
# ACT
# <a href="http://www.austlii.edu.au/au/legis/act/consol_act/stasta1972279/index.html">
# Standard Time and Summer Time Act 1972
# </a>
# SA
# <a href="http://www.austlii.edu.au/au/legis/sa/consol_act/sta1898137/index.html">
# Standard Time Act, 1898
# </a>
# Northern Territory
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The NORTHERN TERRITORY.. [ Courtesy N.T. Dept of the Chief Minister ]
# # [ Nov 1990 ]
# # N.T. have never utilised any DST due to sub-tropical/tropical location.
# ...
# Zone Australia/North 9:30 - CST
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# the Northern Territory do[es] not have daylight saving.
# Western Australia
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of WESTERN AUSTRALIA.. [ Courtesy W.A. dept Premier+Cabinet ]
# # [ Nov 1990 ]
# # W.A. suffers from a great deal of public and political opposition to
# # DST in principle. A bill is brought before parliament in most years, but
# # usually defeated either in the upper house, or in party caucus
# # before reaching parliament.
# ...
# Zone Australia/West 8:00 AW %sST
# ...
# Rule AW 1974 only - Oct lastSun 2:00 1:00 D
# Rule AW 1975 only - Mar Sun>=1 3:00 0 W
# Rule AW 1983 only - Oct lastSun 2:00 1:00 D
# Rule AW 1984 only - Mar Sun>=1 3:00 0 W
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# Western Australia...do[es] not have daylight saving.
# From John D. Newman via Bradley White (1991-11-02):
# Western Australia is still on "winter time". Some DH in Sydney
# rang me at home a few days ago at 6.00am. (He had just arrived at
# work at 9.00am.)
# W.A. is switching to Summer Time on Nov 17th just to confuse
# everybody again.
# From Arthur David Olson (1992-03-08):
# The 1992 ending date used in the rules is a best guess;
# it matches what was used in the past.
# <a href="http://www.bom.gov.au/faq/faqgen.htm">
# The Australian Bureau of Meteorology FAQ
# </a> (1999-09-27) writes that Giles Meteorological Station uses
# South Australian time even though it's located in Western Australia.
# Queensland
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of QUEENSLAND.. [ Courtesy Qld. Dept Premier Econ&Trade Devel ]
# # [ Dec 1990 ]
# ...
# Zone Australia/Queensland 10:00 AQ %sST
# ...
# Rule AQ 1971 only - Oct lastSun 2:00 1:00 D
# Rule AQ 1972 only - Feb lastSun 3:00 0 E
# Rule AQ 1989 max - Oct lastSun 2:00 1:00 D
# Rule AQ 1990 max - Mar Sun>=1 3:00 0 E
# From Bradley White (1989-12-24):
# "Australia/Queensland" now observes daylight time (i.e. from
# October 1989).
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# ...Queensland...[has] agreed to end daylight saving
# at 3am tomorrow (March 3)...
# From John Mackin (1991-03-06):
# I can certainly confirm for my part that Daylight Saving in NSW did in fact
# end on Sunday, 3 March. I don't know at what hour, though. (It surprised
# me.)
# From Bradley White (1992-03-08):
# ...there was recently a referendum in Queensland which resulted
# in the experimental daylight saving system being abandoned. So, ...
# ...
# Rule QLD 1989 1991 - Oct lastSun 2:00 1:00 D
# Rule QLD 1990 1992 - Mar Sun>=1 3:00 0 S
# ...
# From Arthur David Olson (1992-03-08):
# The chosen rules the union of the 1971/1972 change and the 1989-1992 changes.
# South Australia, Tasmania, Victoria
# From Arthur David Olson (1992-03-08):
# The rules from version 7.1 follow.
# There are lots of differences between these rules and
# the Shepherd et al. rules. Since the Shepherd et al. rules
# and Bradley White's newspaper article are in agreement on
# current DST ending dates, no worries.
#
# Rule Oz 1971 1985 - Oct lastSun 2:00 1:00 -
# Rule Oz 1986 max - Oct Sun<=24 2:00 1:00 -
# Rule Oz 1972 only - Feb 27 3:00 0 -
# Rule Oz 1973 1986 - Mar Sun>=1 3:00 0 -
# Rule Oz 1987 max - Mar Sun<=21 3:00 0 -
# Zone Australia/Tasmania 10:00 Oz EST
# Zone Australia/South 9:30 Oz CST
# Zone Australia/Victoria 10:00 Oz EST 1985 Oct lastSun 2:00
# 10:00 1:00 EST 1986 Mar Sun<=21 3:00
# 10:00 Oz EST
# From Robert Elz (1991-03-06):
# I believe that the current start date for DST is "lastSun" in Oct...
# that changed Oct 89. That is, we're back to the
# original rule, and that rule currently applies in all the states
# that have dst, incl Qld. (Certainly it was true in Vic).
# The file I'm including says that happened in 1988, I think
# that's incorrect, but I'm not 100% certain.
# South Australia
# From Bradley White (1991-03-04):
# A recent excerpt from an Australian newspaper...
# ...South Australia...[has] agreed to end daylight saving
# at 3am tomorrow (March 3)...
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of SOUTH AUSTRALIA....[ Courtesy of S.A. Dept of Labour ]
# # [ Nov 1990 ]
# ...
# Zone Australia/South 9:30 AS %sST
# ...
# Rule AS 1971 max - Oct lastSun 2:00 1:00 D
# Rule AS 1972 1985 - Mar Sun>=1 3:00 0 C
# Rule AS 1986 1990 - Mar Sun<=21 3:00 0 C
# Rule AS 1991 max - Mar Sun>=1 3:00 0 C
# From Bradley White (1992-03-11):
# Recent correspondence with a friend in Adelaide
# contained the following exchange: "Due to the Adelaide Festival,
# South Australia delays setting back our clocks for a few weeks."
# From Robert Elz (1992-03-13):
# I heard that apparently (or at least, it appears that)
# South Aus will have an extra 3 weeks daylight saving every even
# numbered year (from 1990). That's when the Adelaide Festival
# is on...
# From Robert Elz (1992-03-16, 00:57:07 +1000):
# DST didn't end in Adelaide today (yesterday)....
# But whether it's "4th Sunday" or "2nd last Sunday" I have no idea whatever...
# (it's just as likely to be "the Sunday we pick for this year"...).
# From Bradley White (1994-04-11):
# If Sun, 15 March, 1992 was at +1030 as kre asserts, but yet Sun, 20 March,
# 1994 was at +0930 as John Connolly's customer seems to assert, then I can
# only conclude that the actual rule is more complicated....
# From John Warburton <jwarb@SACBH.com.au> (1994-10-07):
# The new Daylight Savings dates for South Australia ...
# was gazetted in the Government Hansard on Sep 26 1994....
# start on last Sunday in October and end in last sunday in March.
# Tasmania
# The rules for 1967 through 1991 were reported by George Shepherd
# via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of TASMANIA.. [Courtesy Tasmanian Dept of Premier + Cabinet ]
# # [ Nov 1990 ]
# From Bill Hart via Guy Harris (1991-10-10):
# Oh yes, the new daylight savings rules are uniquely tasmanian, we have
# 6 weeks a year now when we are out of sync with the rest of Australia
# (but nothing new about that).
# From Alex Livingston (1999-10-04):
# I heard on the ABC (Australian Broadcasting Corporation) radio news on the
# (long) weekend that Tasmania, which usually goes its own way in this regard,
# has decided to join with most of NSW, the ACT, and most of Victoria
# (Australia) and start daylight saving on the last Sunday in August in 2000
# instead of the first Sunday in October.
# Victoria
# The rules for 1971 through 1991 were reported by George Shepherd
# via Simon Woodhead via Robert Elz (1991-03-06):
# # The state of VICTORIA.. [ Courtesy of Vic. Dept of Premier + Cabinet ]
# # [ Nov 1990 ]
# New South Wales
# From Arthur David Olson:
# New South Wales and subjurisdictions have their own ideas of a fun time.
# Based on law library research by John Mackin (john@basser.cs.su.oz),
# who notes:
# In Australia, time is not legislated federally, but rather by the
# individual states. Thus, while such terms as ``Eastern Standard Time''
# [I mean, of course, Australian EST, not any other kind] are in common
# use, _they have NO REAL MEANING_, as they are not defined in the
# legislation. This is very important to understand.
# I have researched New South Wales time only...
# From Paul Eggert (1999-09-27):
# The Information Service of the Australian National Standards Commission
# <a href="http://www.nsc.gov.au/InfoServ/Ileaflet/il27.htm">
# Daylight Saving
# </a> page (1995-04) has an excellent overall history of Australian DST.
# The Community Relations Division of the NSW Attorney General's Department
# publishes a history of daylight saving in NSW. See:
# <a href="http://www.lawlink.nsw.gov.au/crd.nsf/pages/time2">
# Lawlink NSW: Daylight Saving in New South Wales
# </a>
# From Eric Ulevik <eau@ozemail.com.au> (1999-05-26):
# DST will start in NSW on the last Sunday of August, rather than the usual
# October in 2000. [See: Matthew Moore,
# <a href="http://www.smh.com.au/news/9905/26/pageone/pageone4.html">
# Two months more daylight saving
# </a>
# Sydney Morning Herald (1999-05-26).]
# From Paul Eggert (1999-09-27):
# See the following official NSW source:
# <a href="http://dir.gis.nsw.gov.au/cgi-bin/genobject/document/other/daylightsaving/tigGmZ">
# Daylight Saving in New South Wales.
# </a>
#
# Narrabri Shire (NSW) council has announced it will ignore the extension of
# daylight saving next year. See:
# <a href="http://abc.net.au/news/regionals/neweng/monthly/regeng-22jul1999-1.htm">
# Narrabri Council to ignore daylight saving
# </a> (1999-07-22). For now, we'll wait to see if this really happens.
#
# Victoria will following NSW. See:
# <a href="http://abc.net.au/local/news/olympics/1999/07/item19990728112314_1.htm">
# Vic to extend daylight saving
# </a> (1999-07-28).
#
# However, South Australia rejected the DST request. See:
# <a href="http://abc.net.au/news/olympics/1999/07/item19990719151754_1.htm">
# South Australia rejects Olympics daylight savings request
# </a> (1999-07-19).
#
# Queensland also will not observe DST for the Olympics. See:
# <a href="http://abc.net.au/news/olympics/1999/06/item19990601114608_1.htm">
# Qld says no to daylight savings for Olympics
# </a> (1999-06-01), which quotes Queensland Premier Peter Beattie as saying
# ``Look you've got to remember in my family when this came up last time
# I voted for it, my wife voted against it and she said to me it's all very
# well for you, you don't have to worry about getting the children out of
# bed, getting them to school, getting them to sleep at night.
# I've been through all this argument domestically...my wife rules.''
#
# Broken Hill will stick with South Australian time in 2000. See:
# <a href="http://abc.net.au/news/regionals/brokenh/monthly/regbrok-21jul1999-6.htm">
# Broken Hill to be behind the times
# </a> (1999-07-21).
# IATA SSIM (1998-09) says that the spring 2000 change for Australian
# Capital Territory, New South Wales except Lord Howe Island and Broken
# Hill, and Victoria will be August 27, presumably due to the Sydney Olympics.
# Yancowinna
# From John Basser (1989-01-04):
# `Broken Hill' means the County of Yancowinna.
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # YANCOWINNA.. [ Confirmation courtesy of Broken Hill Postmaster ]
# # [ Dec 1990 ]
# ...
# # Yancowinna uses Central Standard Time, despite [its] location on the
# # New South Wales side of the S.A. border. Most business and social dealings
# # are with CST zones, therefore CST is legislated by local government
# # although the switch to Summer Time occurs in line with N.S.W. There have
# # been years when this did not apply, but the historical data is not
# # presently available.
# Zone Australia/Yancowinna 9:30 AY %sST
# ...
# Rule AY 1971 1985 - Oct lastSun 2:00 1:00 D
# Rule AY 1972 only - Feb lastSun 3:00 0 C
# [followed by other Rules]
# Lord Howe Island
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# LHI... [ Courtesy of Pauline Van Winsen.. pauline@Aus ]
# [ Dec 1990 ]
# Lord Howe Island is located off the New South Wales coast, and is half an
# hour ahead of NSW time.
# From Paul Eggert (1995-12-19):
# For Lord Howe we use Shanks through 1991.
# Lord Howe is part of NSW, so we'll guess it has used the same transition
# times as NSW since 1991, even though Shanks writes that Lord Howe went
# with Victoria when NSW and Victoria disagreed in 1982.
###############################################################################
# New Zealand, from Elz' asia 1.1
# Elz says "no guarantees"
# From Mark Davies (1990-10-03):
# the 1989/90 year was a trial of an extended "daylight saving" period.
# This trial was deemed successful and the extended period adopted for
# subsequent years (with the addition of a further week at the start).
# source -- phone call to Ministry of Internal Affairs Head Office.
# From George Shepherd via Simon Woodhead via Robert Elz (1991-03-06):
# # The Country of New Zealand (Australia's east island -) Gee they hate that!
# # or is Australia the west island of N.Z.
# # [ courtesy of Geoff Tribble.. Geofft@Aus.. Auckland N.Z. ]
# # [ Nov 1990 ]
# ...
# Rule NZ 1974 1988 - Oct lastSun 2:00 1:00 D
# Rule NZ 1989 max - Oct Sun>=1 2:00 1:00 D
# Rule NZ 1975 1989 - Mar Sun>=1 3:00 0 S
# Rule NZ 1990 max - Mar lastSun 3:00 0 S
# ...
# Zone NZ 12:00 NZ NZ%sT # New Zealand
# Zone NZ-CHAT 12:45 - NZ-CHAT # Chatham Island
# From Arthur David Olson (1992-03-08):
# The chosen rules use the Davies October 8 values for the start of DST in 1989
# rather than the October 1 value.
# From Paul Eggert (1995-12-19);
# Shanks reports 2:00 for all autumn changes in Australia and New Zealand.
# Robert Uzgalis <buz@cs.aukuni.ac.nz> writes that the New Zealand Daylight
# Savings Time Order in Council dated 1990-06-18 specifies 2:00 standard
# time on both the first Sunday in October and the third Sunday in March.
# As with Australia, we'll assume the tradition is 2:00s, not 2:00.
#
# From Paul Eggert (1999-10-29):
# Shanks gives no time data for Chatham; usno1989 says it's +12:45,
# usno1995 says it's +12:45/+13:45, and IATA SSIM (1991/1999)
# gives the NZ rules but with transitions at 2:45 local standard time.
# Guess that they have been in lock-step with NZ since 1990.
###############################################################################
# Fiji
# Howse writes (p 153) that in 1879 the British governor of Fiji
# enacted an ordinance standardizing the islands on Antipodean Time
# instead of the American system (which was one day behind).
# From Rives McDow (1998-10-08):
# Fiji will introduce DST effective 0200 local time, 1998-11-01
# until 0300 local time 1999-02-28. Each year the DST period will
# be from the first Sunday in November until the last Sunday in February.
# From the BBC World Service (1998-10-31 11:32 UTC):
# The Fijiian government says the main reasons for the time change is to
# improve productivity and reduce road accidents. But correspondents say it
# also hopes the move will boost Fiji's ability to compete with other pacific
# islands in the effort to attract tourists to witness the dawning of the new
# millenium.
# Johnston
# Johnston data is from usno1995.
# Kiribati
# From Paul Eggert (1996-01-22):
# Today's _Wall Street Journal_ (page 1) reports that Kiribati
# ``declared it the same day throught the country as of Jan. 1, 1995''
# as part of the competition to be first into the 21st century.
# Kwajalein
# In comp.risks 14.87 (26 August 1993), Peter Neumann writes:
# I wonder what happened in Kwajalein, where there was NO Friday,
# 1993-08-20. Thursday night at midnight Kwajalein switched sides with
# respect to the International Date Line, to rejoin its fellow islands,
# going from 11:59 p.m. Thursday to 12:00 m. Saturday in a blink.
# N Mariana Is, Guam
# Howse writes (p 153) ``The Spaniards, on the other hand, reached the
# Philippines and the Ladrones from America,'' and implies that the Ladrones
# (now called the Marianas) kept American date for quite some time.
# Ignore this for now, as we have no hard data. See also Asia/Manila.
# Micronesia
# Alan Eugene Davis <adavis@kuentos.guam.net> writes (1996-03-16),
# ``I am certain, having lived there for the past decade, that "Truk"
# (now properly known as Chuuk) ... is in the time zone GMT+10.''
#
# Shanks writes that Truk switched from GMT+10 to GMT+11 on 1978-10-01;
# ignore this for now.
# From Paul Eggert (1999-10-29):
# The Federated States of Micronesia Visitors Board writes in
# <a href="http://www.fsmgov.org/info/clocks.html">
# The Federated States of Micronesia - Visitor Information
# </a> (1999-01-26)
# that Truk and Yap are GMT+10, and Ponape and Kosrae are GMT+11.
# We don't know when Kosrae switched from GMT+12; assume January 1 for now.
# Samoa
# Howse writes (p 153, citing p 10 of the 1883-11-18 New York Herald)
# that in 1879 the King of Samoa decided to change
# ``the date in his kingdom from the Antipodean to the American system,
# ordaining -- by a masterpiece of diplomatic flattery -- that
# the Fourth of July should be celebrated twice in that year.''
# Tonga
# From Paul Eggert (1996-01-22):
# Today's _Wall Street Journal_ (p 1) reports that ``Tonga has been plotting
# to sneak ahead of [New Zealanders] by introducing daylight-saving time.''
# Since Kiribati has moved the Date Line it's not clear what Tonga will do.
# Don Mundell writes in the 1997-02-20 Tonga Chronicle
# <a href="http://www.tongatapu.net.to/tonga/homeland/timebegins.htm">
# How Tonga became `The Land where Time Begins'
# </a>:
# Until 1941 Tonga maintained a standard time 50 minutes ahead of NZST
# 12 hours and 20 minutes ahead of GMT. When New Zealand adjusted its
# standard time in 1940s, Tonga had the choice of subtracting from its
# local time to come on the same standard time as New Zealand or of
# advancing its time to maintain the differential of 13 degrees
# (approximately 50 minutes ahead of New Zealand time).
#
# Because His Majesty King Taufa'ahau Tupou IV, then Crown Prince
# Tungi, preferred to ensure Tonga's title as the land where time
# begins, the Legislative Assembly approved the latter change.
#
# But some of the older, more conservative members from the outer
# islands objected. "If at midnight on Dec. 31, we move ahead 40
# minutes, as your Royal Highness wishes, what becomes of the 40
# minutes we have lost?"
#
# The Crown Prince, presented an unanswerable argument: "Remember that
# on the World Day of Prayer, you would be the first people on Earth
# to say your prayers in the morning."
# From Paul Eggert (1999-08-12):
# Shanks says the transition was on 1968-10-01; go with Mundell.
# From Eric Ulevik (1999-05-03):
# Tonga's director of tourism, who is also secretary of the National Millenium
# Committee, has a plan to get Tonga back in front.
# He has proposed a one-off move to tropical daylight saving for Tonga from
# October to March, which has won approval in principle from the Tongan
# Government.
# From Steffen Thorsen [straen@thorsen.priv.no] (1999-09-09):
# * Tonga will introduce DST in November
#
# I was given this link by John Letts <johnletts@earthlink.net>:
# <a hef="http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm">
# http://news.bbc.co.uk/hi/english/world/asia-pacific/newsid_424000/424764.stm
# </a>
#
# I have not been able to find exact dates for the transition in November
# yet. By reading this article it seems like Fiji will be 14 hours ahead
# of UTC as well, but as far as I know Fiji will only be 13 hours ahead
# (12 + 1 hour DST).
# From Arthur David Olson [arthur_david_olson@nih.gov] (1999-09-20):
# According to <a href="http://www.tongaonline.com/news/sept1799.html>
# http://www.tongaonline.com/news/sept1799.html
# </a>:
# "Daylight Savings Time will take effect on Oct. 2 through April 15, 2000
# and annually thereafter from the first Saturday in October through the
# third Saturday of April. Under the system approved by Privy Council on
# Sept. 10, clocks must be turned ahead one hour on the opening day and
# set back an hour on the closing date."
# Alas, no indication of the time of day.
# From Rives McDow (1999-10-06):
# Tonga started its Daylight Saving on Saturday morning October 2nd at 0200am.
# Daylight Saving ends on April 16 at 0300am which is Sunday morning.
|
169,110,414,052,305 | # @(#)backward 7.15
# This file provides links between current names for time zones
# and their old names. Many names changed in late 1993.
Link America/Adak America/Atka
Link America/Indianapolis America/Fort_Wayne
Link America/Indiana/Knox America/Knox_IN
Link America/St_Thomas America/Virgin
Link Asia/Jerusalem Asia/Tel_Aviv
Link Australia/Sydney Australia/ACT
Link Australia/Sydney Australia/Canberra
Link Australia/Lord_Howe Australia/LHI
Link Australia/Sydney Australia/NSW
Link Australia/Darwin Australia/North
Link Australia/Brisbane Australia/Queensland
Link Australia/Adelaide Australia/South
Link Australia/Hobart Australia/Tasmania
Link Australia/Melbourne Australia/Victoria
Link Australia/Perth Australia/West
Link Australia/Broken_Hill Australia/Yancowinna
Link America/Porto_Acre Brazil/Acre
Link America/Noronha Brazil/DeNoronha
Link America/Sao_Paulo Brazil/East
Link America/Manaus Brazil/West
Link America/Halifax Canada/Atlantic
Link America/Winnipeg Canada/Central
Link America/Regina Canada/East-Saskatchewan
Link America/Montreal Canada/Eastern
Link America/Edmonton Canada/Mountain
Link America/St_Johns Canada/Newfoundland
Link America/Vancouver Canada/Pacific
Link America/Regina Canada/Saskatchewan
Link America/Whitehorse Canada/Yukon
Link America/Santiago Chile/Continental
Link Pacific/Easter Chile/EasterIsland
Link America/Havana Cuba
Link Africa/Cairo Egypt
Link Europe/Dublin Eire
Link Europe/London GB
Link Etc/GMT+0 GMT+0
Link Etc/GMT-0 GMT-0
Link Etc/GMT0 GMT0
Link Etc/Greenwich Greenwich
Link Asia/Hong_Kong Hongkong
Link Atlantic/Reykjavik Iceland
Link Asia/Tehran Iran
Link Asia/Jerusalem Israel
Link America/Jamaica Jamaica
Link Asia/Tokyo Japan
Link Pacific/Kwajalein Kwajalein
Link Africa/Tripoli Libya
Link America/Tijuana Mexico/BajaNorte
Link America/Mazatlan Mexico/BajaSur
Link America/Mexico_City Mexico/General
Link America/Denver Navajo
Link Pacific/Auckland NZ
Link Pacific/Chatham NZ-CHAT
Link Pacific/Pago_Pago Pacific/Samoa
Link Asia/Shanghai PRC
Link Europe/Warsaw Poland
Link Europe/Lisbon Portugal
Link Asia/Taipei ROC
Link Asia/Seoul ROK
Link Asia/Singapore Singapore
Link Europe/Istanbul Turkey
Link Etc/UCT UCT
Link America/Anchorage US/Alaska
Link America/Adak US/Aleutian
Link America/Phoenix US/Arizona
Link America/Chicago US/Central
Link America/Indianapolis US/East-Indiana
Link America/New_York US/Eastern
Link Pacific/Honolulu US/Hawaii
Link America/Indiana/Knox US/Indiana-Starke
Link America/Detroit US/Michigan
Link America/Denver US/Mountain
Link America/Los_Angeles US/Pacific
Link Pacific/Pago_Pago US/Samoa
Link Etc/UTC UTC
Link Etc/Universal Universal
Link Europe/Moscow W-SU
Link Etc/Zulu Zulu
| # @(#)backward 7.16
# This file provides links between current names for time zones
# and their old names. Many names changed in late 1993.
Link America/Adak America/Atka
Link America/Tijuana America/Ensenada
Link America/Indianapolis America/Fort_Wayne
Link America/Indiana/Knox America/Knox_IN
Link America/St_Thomas America/Virgin
Link Asia/Jerusalem Asia/Tel_Aviv
Link Australia/Sydney Australia/ACT
Link Australia/Sydney Australia/Canberra
Link Australia/Lord_Howe Australia/LHI
Link Australia/Sydney Australia/NSW
Link Australia/Darwin Australia/North
Link Australia/Brisbane Australia/Queensland
Link Australia/Adelaide Australia/South
Link Australia/Hobart Australia/Tasmania
Link Australia/Melbourne Australia/Victoria
Link Australia/Perth Australia/West
Link Australia/Broken_Hill Australia/Yancowinna
Link America/Porto_Acre Brazil/Acre
Link America/Noronha Brazil/DeNoronha
Link America/Sao_Paulo Brazil/East
Link America/Manaus Brazil/West
Link America/Halifax Canada/Atlantic
Link America/Winnipeg Canada/Central
Link America/Regina Canada/East-Saskatchewan
Link America/Montreal Canada/Eastern
Link America/Edmonton Canada/Mountain
Link America/St_Johns Canada/Newfoundland
Link America/Vancouver Canada/Pacific
Link America/Regina Canada/Saskatchewan
Link America/Whitehorse Canada/Yukon
Link America/Santiago Chile/Continental
Link Pacific/Easter Chile/EasterIsland
Link America/Havana Cuba
Link Africa/Cairo Egypt
Link Europe/Dublin Eire
Link Europe/London GB
Link Etc/GMT+0 GMT+0
Link Etc/GMT-0 GMT-0
Link Etc/GMT0 GMT0
Link Etc/Greenwich Greenwich
Link Asia/Hong_Kong Hongkong
Link Atlantic/Reykjavik Iceland
Link Asia/Tehran Iran
Link Asia/Jerusalem Israel
Link America/Jamaica Jamaica
Link Asia/Tokyo Japan
Link Pacific/Kwajalein Kwajalein
Link Africa/Tripoli Libya
Link America/Tijuana Mexico/BajaNorte
Link America/Mazatlan Mexico/BajaSur
Link America/Mexico_City Mexico/General
Link America/Denver Navajo
Link Pacific/Auckland NZ
Link Pacific/Chatham NZ-CHAT
Link Pacific/Pago_Pago Pacific/Samoa
Link Asia/Shanghai PRC
Link Europe/Warsaw Poland
Link Europe/Lisbon Portugal
Link Asia/Taipei ROC
Link Asia/Seoul ROK
Link Asia/Singapore Singapore
Link Europe/Istanbul Turkey
Link Etc/UCT UCT
Link America/Anchorage US/Alaska
Link America/Adak US/Aleutian
Link America/Phoenix US/Arizona
Link America/Chicago US/Central
Link America/Indianapolis US/East-Indiana
Link America/New_York US/Eastern
Link Pacific/Honolulu US/Hawaii
Link America/Indiana/Knox US/Indiana-Starke
Link America/Detroit US/Michigan
Link America/Denver US/Mountain
Link America/Los_Angeles US/Pacific
Link Pacific/Pago_Pago US/Samoa
Link Etc/UTC UTC
Link Etc/Universal Universal
Link Europe/Moscow W-SU
Link Etc/Zulu Zulu
|
253,536,374,518,669 | # Check tz tables for consistency.
# @(#)checktab.awk 1.5
# Contributed by Paul Eggert <eggert@twinsun.com>.
BEGIN {
FS = "\t"
if (!iso_table) iso_table = "iso3166.tab"
if (!zone_table) zone_table = "zone.tab"
if (!want_warnings) want_warnings = -1
while (getline <iso_table) {
iso_NR++
if ($0 ~ /^#/) continue
if (NF != 2) {
printf "%s:%d: wrong number of columns\n", \
iso_table, iso_NR >>"/dev/stderr"
status = 1
}
cc = $1
name = $2
if (cc !~ /^[A-Z][A-Z]$/) {
printf "%s:%d: invalid country code `%s'\n", \
iso_table, iso_NR, cc >>"/dev/stderr"
status = 1
}
if (cc <= cc0) {
if (cc == cc0) {
s = "duplicate";
} else {
s = "out of order";
}
printf "%s:%d: country code `%s' is %s\n", \
iso_table, iso_NR, cc, s \
>>"/dev/stderr"
status = 1
}
cc0 = cc
if (name2cc[name]) {
printf "%s:%d: `%s' and `%s' have the sname name\n", \
iso_table, iso_NR, name2cc[name], cc \
>>"/dev/stderr"
status = 1
}
name2cc[name] = cc
cc2name[cc] = name
cc2NR[cc] = iso_NR
}
zone_table = "zone.tab"
cc0 = ""
while (getline <zone_table) {
zone_NR++
if ($0 ~ /^#/) continue
if (NF != 3 && NF != 4) {
printf "%s:%d: wrong number of columns\n", \
zone_table, zone_NR >>"/dev/stderr"
status = 1
}
cc = $1
coordinates = $2
tz = $3
comments = $4
if (cc < cc0) {
printf "%s:%d: country code `%s' is out of order\n", \
zone_table, zone_NR, cc >>"/dev/stderr"
status = 1
}
cc0 = cc
if (tz2cc[tz]) {
printf "%s:%d: %s: duplicate TZ column\n", \
zone_table, zone_NR, tz >>"/dev/stderr"
status = 1
}
tz2cc[tz] = cc
tz2comments[tz] = comments
tz2NR[tz] = zone_NR
if (cc2name[cc]) {
cc_used[cc]++
} else {
printf "%s:%d: %s: unknown country code\n", \
zone_table, zone_NR, cc >>"/dev/stderr"
status = 1
}
if (coordinates !~ /^[-+][0-9][0-9][0-5][0-9][-+][01][0-9][0-9][0-5][0-9]$/ \
&& coordinates !~ /^[-+][0-9][0-9][0-5][0-9][0-5][0-9][-+][01][0-9][0-9][0-5][0-9][0-5][0-9]$/) {
printf "%s:%d: %s: invalid coordinates\n", \
zone_table, zone_NR, coordinates >>"/dev/stderr"
status = 1
}
}
for (tz in tz2cc) {
if (cc_used[tz2cc[tz]] == 1) {
if (tz2comments[tz]) {
printf "%s:%d: unnecessary comment `%s'\n", \
zone_table, tz2NR[tz], tz2comments[tz] \
>>"/dev/stderr"
status = 1
}
} else {
if (!tz2comments[tz]) {
printf "%s:%d: missing comment\n", \
zone_table, tz2NR[tz] >>"/dev/stderr"
status = 1
}
}
}
FS = " "
}
{
tz = ""
if ($1 == "Zone") tz = $2
if ($1 == "Link") {
# Ignore Link commands if source and destination basenames
# are identical, e.g. Europe/Istanbul versus Asia/Istanbul.
src = $2
dst = $3
while ((i = index(src, "/"))) src = substr(src, i+1)
while ((i = index(dst, "/"))) dst = substr(dst, i+1)
if (src != dst) tz = $3
}
if (tz && tz ~ /\//) {
if (!tz2cc[tz]) {
printf "%s: no data for `%s'\n", zone_table, tz \
>>"/dev/stderr"
status = 1
}
zoneSeen[tz] = 1
}
}
END {
for (tz in tz2cc) {
if (!zoneSeen[tz]) {
printf "%s:%d: no Zone table for `%s'\n", \
zone_table, tz2NR[tz], tz >>"/dev/stderr"
status = 1
}
}
if (0 < want_warnings) {
for (cc in cc2name) {
if (!cc_used[cc]) {
printf "%s:%d: warning:" \
"no Zone entries for %s (%s)\n", \
iso_table, cc2NR[cc], cc, cc2name[cc]
}
}
}
exit status
}
| # Check tz tables for consistency.
# @(#)checktab.awk 1.6
# Contributed by Paul Eggert <eggert@twinsun.com>.
BEGIN {
FS = "\t"
if (!iso_table) iso_table = "iso3166.tab"
if (!zone_table) zone_table = "zone.tab"
if (!want_warnings) want_warnings = -1
while (getline <iso_table) {
iso_NR++
if ($0 ~ /^#/) continue
if (NF != 2) {
printf "%s:%d: wrong number of columns\n", \
iso_table, iso_NR >>"/dev/stderr"
status = 1
}
cc = $1
name = $2
if (cc !~ /^[A-Z][A-Z]$/) {
printf "%s:%d: invalid country code `%s'\n", \
iso_table, iso_NR, cc >>"/dev/stderr"
status = 1
}
if (cc <= cc0) {
if (cc == cc0) {
s = "duplicate";
} else {
s = "out of order";
}
printf "%s:%d: country code `%s' is %s\n", \
iso_table, iso_NR, cc, s \
>>"/dev/stderr"
status = 1
}
cc0 = cc
if (name2cc[name]) {
printf "%s:%d: `%s' and `%s' have the sname name\n", \
iso_table, iso_NR, name2cc[name], cc \
>>"/dev/stderr"
status = 1
}
name2cc[name] = cc
cc2name[cc] = name
cc2NR[cc] = iso_NR
}
zone_table = "zone.tab"
cc0 = ""
while (getline <zone_table) {
zone_NR++
if ($0 ~ /^#/) continue
if (NF != 3 && NF != 4) {
printf "%s:%d: wrong number of columns\n", \
zone_table, zone_NR >>"/dev/stderr"
status = 1
}
cc = $1
coordinates = $2
tz = $3
comments = $4
if (cc < cc0) {
printf "%s:%d: country code `%s' is out of order\n", \
zone_table, zone_NR, cc >>"/dev/stderr"
status = 1
}
cc0 = cc
if (tz2cc[tz]) {
printf "%s:%d: %s: duplicate TZ column\n", \
zone_table, zone_NR, tz >>"/dev/stderr"
status = 1
}
tz2cc[tz] = cc
tz2comments[tz] = comments
tz2NR[tz] = zone_NR
if (cc2name[cc]) {
cc_used[cc]++
} else {
printf "%s:%d: %s: unknown country code\n", \
zone_table, zone_NR, cc >>"/dev/stderr"
status = 1
}
if (coordinates !~ /^[-+][0-9][0-9][0-5][0-9][-+][01][0-9][0-9][0-5][0-9]$/ \
&& coordinates !~ /^[-+][0-9][0-9][0-5][0-9][0-5][0-9][-+][01][0-9][0-9][0-5][0-9][0-5][0-9]$/) {
printf "%s:%d: %s: invalid coordinates\n", \
zone_table, zone_NR, coordinates >>"/dev/stderr"
status = 1
}
}
for (tz in tz2cc) {
if (cc_used[tz2cc[tz]] == 1) {
if (tz2comments[tz]) {
printf "%s:%d: unnecessary comment `%s'\n", \
zone_table, tz2NR[tz], tz2comments[tz] \
>>"/dev/stderr"
status = 1
}
} else {
if (!tz2comments[tz]) {
printf "%s:%d: missing comment\n", \
zone_table, tz2NR[tz] >>"/dev/stderr"
status = 1
}
}
}
FS = " "
}
{
tz = ""
if ($1 == "Zone") tz = $2
if ($1 == "Link") {
# Ignore Link commands if source and destination basenames
# are identical, e.g. Europe/Istanbul versus Asia/Istanbul.
src = $2
dst = $3
while ((i = index(src, "/"))) src = substr(src, i+1)
while ((i = index(dst, "/"))) dst = substr(dst, i+1)
if (src != dst) tz = $3
}
if (tz && tz ~ /\//) {
if (!tz2cc[tz]) {
printf "%s: no data for `%s'\n", zone_table, tz \
>>"/dev/stderr"
status = 1
}
zoneSeen[tz] = 1
}
}
END {
for (tz in tz2cc) {
if (!zoneSeen[tz]) {
printf "%s:%d: no Zone table for `%s'\n", \
zone_table, tz2NR[tz], tz >>"/dev/stderr"
status = 1
}
}
if (0 < want_warnings) {
for (cc in cc2name) {
if (!cc_used[cc]) {
printf "%s:%d: warning: " \
"no Zone entries for %s (%s)\n", \
iso_table, cc2NR[cc], cc, cc2name[cc]
}
}
}
exit status
}
|
16,939,242,052,126 | # @(#)southamerica 7.31
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-07-07):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (4th edition),
# San Diego: ACS Publications, Inc. (1995).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Earlier editions of these tables used the North American style (e.g. ARST and
# ARDT for Argentine Standard and Daylight Time), but the following quote
# suggests that it's better to use European style (e.g. ART and ARST).
# I suggest the use of _Summer time_ instead of the more cumbersome
# _daylight-saving time_. _Summer time_ seems to be in general use
# in Europe and South America.
# -- E O Cutler, _New York Times_ (1937-02-14), quoted in
# H L Mencken, _The American Language: Supplement I_ (1960), p 466
#
# Earlier editions of these tables also used the North American style
# for time zones in Brazil, but this was incorrect, as Brazilians say
# "summer time". Reinaldo Goulart, a Sao Paulo businessman active in
# the railroad sector, writes (1999-07-06):
# The subject of time zones is currently a matter of discussion/debate in
# Brazil. Let's say that "the Brasilia time" is considered the
# "official time" because Brasilia is the capital city.
# The other three time zones are called "Brasilia time "minus one" or
# "plus one" or "plus two". As far as I know there is no such
# name/designation as "Eastern Time" or "Central Time".
# So I invented the following (English-language) abbreviations for now.
# Corrections are welcome!
# std dst
# -2:00 FNT FNST Fernando de Noronha
# -3:00 BRT BRST Brasilia
# -4:00 AMT AMST Amazon
# -5:00 ACT ACST Acre
###############################################################################
###############################################################################
# Argentina
# From Bob Devine (1988-01-28):
# Argentina: first Sunday in October to first Sunday in April since 1976.
# Double Summer time from 1969 to 1974. Switches at midnight.
# From U. S. Naval Observatory (1988-01-199):
# ARGENTINA 3 H BEHIND UTC
# From Hernan G. Otero <hernan@isoft.com.ar> (1995-06-26):
# I am sending modifications to the Argentine time zone table...
# AR was chosen because they are the ISO letters that represent Argentina.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Arg 1930 only - Dec 1 0:00 1:00 S
Rule Arg 1931 only - Apr 1 0:00 0 -
Rule Arg 1931 only - Oct 15 0:00 1:00 S
Rule Arg 1932 1940 - Mar 1 0:00 0 -
Rule Arg 1932 1939 - Nov 1 0:00 1:00 S
Rule Arg 1940 only - Jul 1 0:00 1:00 S
Rule Arg 1941 only - Jun 15 0:00 0 -
Rule Arg 1941 only - Oct 15 0:00 1:00 S
Rule Arg 1943 only - Aug 1 0:00 0 -
Rule Arg 1943 only - Oct 15 0:00 1:00 S
Rule Arg 1946 only - Mar 1 0:00 0 -
Rule Arg 1946 only - Oct 1 0:00 1:00 S
Rule Arg 1963 only - Oct 1 0:00 0 -
Rule Arg 1963 only - Dec 15 0:00 1:00 S
Rule Arg 1964 1966 - Mar 1 0:00 0 -
Rule Arg 1964 1966 - Oct 15 0:00 1:00 S
Rule Arg 1967 only - Apr 1 0:00 0 -
Rule Arg 1967 1968 - Oct Sun<=7 0:00 1:00 S
Rule Arg 1968 1969 - Apr Sun<=7 0:00 0 -
Rule Arg 1974 only - Jan 23 0:00 1:00 S
Rule Arg 1974 only - May 1 0:00 0 -
Rule Arg 1974 1976 - Oct Sun<=7 0:00 1:00 S
Rule Arg 1975 1977 - Apr Sun<=7 0:00 0 -
Rule Arg 1985 only - Nov 2 0:00 1:00 S
Rule Arg 1986 only - Mar 14 0:00 0 -
Rule Arg 1986 1987 - Oct 25 0:00 1:00 S
Rule Arg 1987 only - Feb 13 0:00 0 -
Rule Arg 1988 only - Feb 7 0:00 0 -
Rule Arg 1988 only - Dec 1 0:00 1:00 S
#
# From Hernan G. Otero <hernan@isoft.com.ar> (1995-06-26):
# These corrections were contributed by InterSoft Argentina S.A.,
# obtaining the data from the:
# Talleres de Hidrografia Naval Argentina
# (Argentine Naval Hydrography Institute)
#
# Shanks gives 1989 Mar 16 and stops after 1990 Mar 4; go with Otero.
Rule Arg 1989 1993 - Mar Sun>=1 0:00 0 -
Rule Arg 1989 1992 - Oct Sun>=15 0:00 1:00 S
#
# From Hernan G. Otero <hernan@isoft.com.ar> (1995-06-26):
# From this moment on, the law that mandated the daylight saving
# time corrections was derogated and no more modifications
# to the time zones (for daylight saving) are now made.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
# Buenos Aires (BA), Distrito Federal (DF), Santa Cruz (SC),
# Tierra del Fuego (TF) & Antartida e Islas
Zone America/Buenos_Aires -3:53:48 - LMT 1894 Nov
-4:16:44 - CMT 1920 May # Cordoba Mean Time
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT
#
# Santa Fe (SF), Entre Rios (ER), Corrientes (CN), Misiones (MN), Chaco (CC),
# Formosa (FM), La Pampa (LP), Chubut (CH)
Zone America/Rosario -4:02:40 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1991 Jul
-3:00 - ART
#
# Cordoba (CB), Santiago del Estero (SE), Salta (SA), Tucuman (TM), La Rioja (LR), San Juan (SJ), San Luis (SL),
# Neuquen (NQ), Rio Negro (RN)
Zone America/Cordoba -4:16:44 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1990 Jul
-3:00 - ART
#
# Jujuy (JY)
Zone America/Jujuy -4:21:12 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1991 Mar 3
-4:00 - WART 1991 Oct 6
-4:00 1:00 WARST 1992 Mar 15
-4:00 - WART 1992 Oct 18
-3:00 - ART
#
# Catamarca (CT)
Zone America/Catamarca -4:23:08 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1990 Jul
-3:00 - ART 1991 Jul
-3:00 Arg AR%sT 1992 Jul
-3:00 - ART
#
# Mendoza (MZ)
Zone America/Mendoza -4:35:16 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1991 Mar 3
-4:00 - WART 1991 Oct 15
-4:00 1:00 WARST 1992 Mar 1
-4:00 - WART 1992 Oct 18
-3:00 - ART
# Aruba
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Aruba -4:40:24 - LMT 1912 Feb 12 # Oranjestad
-4:30 - ANT 1965 # Netherlands Antilles Time
-4:00 - AST
# Bolivia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/La_Paz -4:32:36 - LMT 1890
-4:32:36 - LPMT 1931 Oct 15 # La Paz Mean Time
-4:32:36 1:00 BOST 1932 Mar 21 # Bolivia ST
-4:00 - BOT # Bolivia Time
# Brazil
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# The mayor of Rio recently attempted to change the time zone rules
# just in his city, in order to leave more summer time for the tourist trade.
# The rule change lasted only part of the day;
# the federal government refused to follow the city's rules, and business
# was in a chaos, so the mayor backed down that afternoon.
# From IATA SSIM (1996-02):
# _Only_ the following states in BR1 observe DST: Rio Grande do Sul (RS),
# Santa Catarina (SC), Parana (PR), Sao Paulo (SP), Rio de Janeiro (RJ),
# Espirito Santo (ES), Minas Gerais (MG), Bahia (BA), Goias (GO),
# Distrito Federal (DF), Tocantins (TO), Sergipe [SE] and Alagoas [AL].
# [The last three states are new to this issue of the IATA SSIM.]
# From Gwillim Law (1996-10-07):
# Geography, history (Tocantins was part of Goias until 1989), and other
# sources of time zone information lead me to believe that AL, SE, and TO were
# always in BR1, and so the only change was whether or not they observed DST....
# The earliest issue of the SSIM I have is 2/91. Each issue from then until
# 9/95 says that DST is observed only in the ten states I quoted from 9/95,
# along with Mato Grosso (MT) and Mato Grosso do Sul (MS), which are in BR2
# (UTC-4).... The other two time zones given for Brazil are BR3, which is
# UTC-5, no DST, and applies only in the state of Acre (AC); and BR4, which is
# UTC-2, and applies to Fernando de Noronha (formerly FN, but I believe it's
# become part of the state of Pernambuco). The boundary between BR1 and BR2
# has never been clearly stated. They've simply been called East and West.
# However, some conclusions can be drawn from another IATA manual: the Airline
# Coding Directory, which lists close to 400 airports in Brazil. For each
# airport it gives a time zone which is coded to the SSIM. From that
# information, I'm led to conclude that the states of Amapa (AP), Ceara (CE),
# Maranhao (MA), Paraiba (PR), Pernambuco (PE), Piaui (PI), and Rio Grande do
# Norte (RN), and the eastern part of Para (PA) are all in BR1 without DST.
# From Paul Eggert (1996-11-22):
# Let's make the following assumptions:
#
# * All data in Shanks are correct through 1990. In particular,
# Shanks was right when he said Acre stopped observing DST in mid-1988.
# * Areas where Shanks reports DST up to 1990, but the IATA reports no DST
# in 1995, stopped observing DST in mid-1990.
#
# Under these assumptions Brazil needs 7 entries to cover all the distinct
# time zone histories since 1970:
#
# Noronha (UTC-2), Fortaleza (UTC-3), and Manaus (UTC-4) stopped observing DST
# in mid-1990.
# Maceio (UTC-3) stopped observing DST in mid-1990, but started again mid-1995.
# Sao Paulo (UTC-3) and Cuiaba (UTC-4) always observed DST.
# Porto Acre (UTC-5) stopped observing DST in mid-1988.
# From Rodrigo Feher <feher@pobox.com> (1998-01-17):
# Reading "southamerica" file in timezone 7.55 I've found an
# error. Line 193 say "Territory of Acre". It is not a territory anymore
# but a state.
# From Marcos Tadeu (1998-09-27):
# <a href="http://pcdsh01.on.br/verao1.html">
# Brazilian official page
# </a>
#
# From Paul Eggert (1998-09-28):
# The official decrees referenced below are taken from
# <a href="http://pcdsh01.on.br/DecHV.html">
# Decretos sobre o Horario de Verao no Brasil
# </a> (1998-09-25, in Portuguese).
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Decree <a href="http://pcdsh01.on.br/HV20466.htm">20,466</a> (1931-10-01)
# Decree <a href="http://pcdsh01.on.br/HV21896.htm">21,896</a> (1932-01-10)
Rule Brazil 1931 only - Oct 3 11:00 1:00 S
Rule Brazil 1932 1933 - Apr 1 0:00 0 -
Rule Brazil 1932 only - Oct 3 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV23195.htm">23,195</a> (1933-10-10)
# revoked DST.
# Decree <a href="http://pcdsh01.on.br/HV27496.htm">27,496</a> (1949-11-24)
# Decree <a href="http://pcdsh01.on.br/HV27998.htm">27,998</a> (1950-04-13)
Rule Brazil 1949 1952 - Dec 1 0:00 1:00 S
Rule Brazil 1950 only - Apr 16 1:00 0 -
Rule Brazil 1951 1952 - Apr 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV32308.htm">32,308</a> (1953-02-24)
Rule Brazil 1953 only - Mar 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV34724.htm">34,724</a> (1953-11-30)
# revoked DST.
# Decree <a href="http://pcdsh01.on.br/HV52700.htm">52,700</a> (1963-10-18)
# established DST from 1963-10-23 00:00 to 1964-02-29 00:00
# in SP, RJ, GB, MG, ES, due to the prolongation of the drought.
# Decree <a href="http://pcdsh01.on.br/HV53071.htm">53,071</a> (1963-12-03)
# extended the above decree to all of the national territory on 12-09.
Rule Brazil 1963 only - Dec 9 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV53604.htm">53,604</a> (1964-02-25)
# extended summer time by one day to 1964-03-01 00:00 (start of school).
Rule Brazil 1964 only - Mar 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV55639.htm">55,639</a> (1965-01-27)
Rule Brazil 1965 only - Jan 31 0:00 1:00 S
Rule Brazil 1965 only - Mar 31 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV57303.htm">57,303</a> (1965-11-22)
Rule Brazil 1965 only - Dec 1 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV57843.htm">57,843</a> (1966-02-18)
Rule Brazil 1966 1968 - Mar 1 0:00 0 -
Rule Brazil 1966 1967 - Nov 1 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV63429.htm">63,429</a> (1968-10-15)
# revoked DST.
# Decree <a href="http://pcdsh01.on.br/HV91698.htm">91,698</a> (1985-09-27)
Rule Brazil 1985 only - Nov 2 0:00 1:00 S
# Decree 92,310 (1986-01-21)
# Decree 92,463 (1986-03-13)
Rule Brazil 1986 only - Mar 15 0:00 0 -
# Decree 93,316 (1986-10-01)
Rule Brazil 1986 only - Oct 25 0:00 1:00 S
Rule Brazil 1987 only - Feb 14 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV94922.htm">94,922</a> (1987-09-22)
Rule Brazil 1987 only - Oct 25 0:00 1:00 S
Rule Brazil 1988 only - Feb 7 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV96676.htm">96,676</a> (1988-09-12)
# except for the states of AC, AM, PA, RR, RO, and AP (then a territory)
Rule Brazil 1988 only - Oct 16 0:00 1:00 S
Rule Brazil 1989 only - Jan 29 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV98077.htm">98,077</a> (1989-08-21)
# with the same exceptions
Rule Brazil 1989 only - Oct 15 0:00 1:00 S
Rule Brazil 1990 only - Feb 11 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV99530.htm">99,530</a> (1990-09-17)
# adopted by RS, SC, PR, SP, RJ, ES, MG, GO, MS, DF.
# Decree 99,629 (1990-10-19) adds BA, MT.
Rule Brazil 1990 only - Oct 21 0:00 1:00 S
Rule Brazil 1991 only - Feb 17 0:00 0 -
# <a href="http://pcdsh01.on.br/HV1991.htm">Unnumbered decree</a> (1991-09-25)
# adopted by RS, SC, PR, SP, RJ, ES, MG, BA, GO, MT, MS, DF.
Rule Brazil 1991 only - Oct 20 0:00 1:00 S
Rule Brazil 1992 only - Feb 9 0:00 0 -
# <a href="http://pcdsh01.on.br/HV1992.htm">Unnumbered decree</a> (1992-10-16)
# adopted by same states.
Rule Brazil 1992 only - Oct 25 0:00 1:00 S
Rule Brazil 1993 only - Jan 31 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV942.htm">942</a> (1993-09-28)
# adopted by same states, plus AM.
# Decree <a href="http://pcdsh01.on.br/HV1252.htm">1,252</a> (1994-09-22)
# adopted by same states, minus AM.
# Decree <a href="http://pcdsh01.on.br/HV1636.htm">1,636</a> (1995-09-14)
# adopted by same states, plus TO.
# Decree <a href="http://pcdsh01.on.br/HV1674.htm">1,674</a> (1995-10-13)
# adds AL, SE.
Rule Brazil 1993 1995 - Oct Sun>=11 0:00 1:00 S
Rule Brazil 1994 1995 - Feb Sun>=15 0:00 0 -
Rule Brazil 1996 only - Feb 11 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV2000.htm">2,000</a> (1996-09-04)
# adopted by same states, minus AL, SE.
Rule Brazil 1996 only - Oct 6 0:00 1:00 S
Rule Brazil 1997 only - Feb 16 0:00 0 -
# From Daniel C. Sobral <dcs@gns.com.br> (1998-02-12):
# In 1997, the DS began on October 6. The stated reason was that
# because international television networks ignored Brazil's policy on DS,
# they bought the wrong times on satellite for coverage of Pope's visit.
# This year, the ending date of DS was postponed to March 1
# to help dealing with the shortages of electric power.
#
# From Paul Eggert (1998-02-25):
# <a href="http://churchnet.ucsm.ac.uk/news/files2/news165.htm">
# Brazil Prepares for Papal Visit
# </a>,
# Church Net UK (1997-10-02).
#
# Decree 2,317 (1997-09-04), adopted by same states.
Rule Brazil 1997 only - Oct 6 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/figuras/HV2495.JPG">2,495</a>
# (1998-02-10)
Rule Brazil 1998 only - Mar 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/figuras/Hv98.jpg">2,780</a> (1998-09-11)
# adopted by the same states as before.
Rule Brazil 1998 only - Oct 11 0:00 1:00 S
Rule Brazil 1999 only - Feb 21 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/figuras/HV3150.gif">3,150</a>
# (1999-08-23), adopted by same states, says only 1999-10-03 and 2000-02-27;
# after that, these rules are guesses and are quite possibly wrong,
# but they are more likely than no DST at all.
Rule Brazil 1999 max - Oct Sun>=1 0:00 1:00 S
Rule Brazil 2000 max - Feb lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
# Fernando de Noronha
Zone America/Noronha -2:09:40 - LMT 1914
-2:00 Brazil FN%sT 1990 Jul
-2:00 - FNT
#
# Amapa, east Para
Zone America/Belem -3:13:56 - LMT 1914
-3:00 Brazil BR%sT 1988 Jul
-3:00 - BRT
#
# Maranhao, Piaui, Ceara, Rio Grande do Norte, Paraiba,
# Pernambuco (except Fernando de Noronha)
Zone America/Fortaleza -2:34:00 - LMT 1914
-3:00 Brazil BR%sT 1990 Jul
-3:00 - BRT
#
# Tocantins
Zone America/Araguaina -3:12:48 - LMT 1914
-3:00 Brazil BR%sT 1990 Jul
-3:00 - BRT 1995 Jul
-3:00 Brazil BR%sT
#
# Alagoas, Sergipe
Zone America/Maceio -2:22:52 - LMT 1914
-3:00 Brazil BR%sT 1990 Jul
-3:00 - BRT 1995 Jul
-3:00 Brazil BR%sT 1996 Jul
-3:00 - BRT
#
# Bahia, Goias, Distrito Federal, Minas Gerais, Espirito Santo, Rio de Janeiro,
# Sao Paulo, Parana, Santa Catarina, Rio Grande do Sul
Zone America/Sao_Paulo -3:06:28 - LMT 1914
-3:00 Brazil BR%sT 1963 Oct 23 00:00
-3:00 - BRST 1964
-3:00 Brazil BR%sT
#
# Mato Grosso, Mato Grosso do Sul
Zone America/Cuiaba -3:44:20 - LMT 1914
-4:00 Brazil AM%sT 1990 Jul
-4:00 - AMT
#
# Roraima, west Para, Rondonia
Zone America/Porto_Velho -4:15:36 - LMT 1914
-4:00 Brazil AM%sT 1988 Jul
-4:00 - AMT
#
# Amazonas
Zone America/Manaus -4:00:04 - LMT 1914
-4:00 Brazil AM%sT 1988 Jul
-4:00 - AMT 1993 Jul
-4:00 Brazil AM%sT 1994 Jul
-4:00 - AMT
#
# Acre
# Rio_Branco is too ambiguous, since there's a Rio Branco in Uruguay too.
Zone America/Porto_Acre -4:31:12 - LMT 1914
-5:00 Brazil AC%sT 1988 Jul
-5:00 - ACT
#
# Martin Vaz and Trinidade are like America/Noronha.
# Chile
# From Eduardo Krell (1995-10-19):
# The law says to switch to DST at midnight [24:00] on the second SATURDAY
# of October.... The law is the same for March and October.
# (1998-09-29):
# Because of the drought this year, the government decided to go into
# DST earlier (saturday 9/26 at 24:00). This is a one-time change only ...
# (unless there's another dry season next year, I guess).
# From Julio I. Pacheco Troncoso (1999-03-18):
# Because of the same drought, the government decided to end DST later,
# on April 3, (one-time change).
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Chile 1918 only - Sep 1 0:00 1:00 S
Rule Chile 1919 only - Jul 2 0:00 0 -
Rule Chile 1927 1931 - Sep 1 0:00 1:00 S
Rule Chile 1928 1932 - Apr 1 0:00 0 -
Rule Chile 1969 1997 - Oct Sun>=9 0:00 1:00 S
Rule Chile 1970 1998 - Mar Sun>=9 0:00 0 -
Rule Chile 1998 only - Sep 27 0:00 1:00 S
Rule Chile 1999 only - Apr 4 0:00 0 -
Rule Chile 1999 max - Oct Sun>=9 0:00 1:00 S
Rule Chile 2000 max - Mar Sun>=9 0:00 0 -
# IATA SSIM anomalies: (1990-09) says 1990-09-16; (1992-02) says 1992-03-14;
# (1996-09) says 1998-03-08. Ignore these for now.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Santiago -4:42:40 - LMT 1890
-4:42:40 - SMT 1910 # Santiago Mean Time
-5:00 Chile CL%sT 1932 Sep # Chile Time
-4:00 Chile CL%sT
Zone Pacific/Easter -7:17:28 - LMT 1890 # Mataveri
-7:17:28 - MMT 1932 Sep # Mataveri Mean Time
-7:00 Chile EAS%sT 1982 Mar 14 # Easter I Time
-6:00 Chile EAS%sT
#
# Whitman says Juan Fernandez Is are like America/Santiago.
# San Ambrosio, San Felix
# no information; probably like America/Santiago
# Colombia
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule CO 1992 only - May 2 0:00 1:00 S
# Shanks (1995) estimates 1993-04-03 24:00 for this; go with IATA.
Rule CO 1992 only - Dec 31 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Bogota -4:56:20 - LMT 1884 Mar 13
-4:56:20 - BMT 1914 Nov 23 # Bogota Mean Time
-5:00 CO CO%sT # Colombia Time
# Malpelo, Providencia, San Andres
# no information; probably like America/Bogota
# Curacao
# Shanks (1995) says that Bottom and Oranjestad have been at -4:00 since
# standard time was introduced on 1912-03-02; and that Kralendijk and Rincon
# used Kralendijk Mean Time (-4:33:08) from 1912-02-02 to 1965-01-01.
# This all predates our 1970 cutoff, though.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Curacao -4:35:44 - LMT 1912 Feb 12 # Willemstad
-4:30 - ANT 1965 # Netherlands Antilles Time
-4:00 - AST
# Ecuador
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Guayaquil -5:19:20 - LMT 1890
-5:14:00 - QMT 1931 # Quito Mean Time
-5:00 - ECT # Ecuador Time
Zone Pacific/Galapagos -5:58:24 - LMT 1931 # Puerto Baquerizo Moreno
-5:00 - ECT 1986
-6:00 - GALT # Galapagos Time
# Falklands
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Falk 1937 1938 - Sep lastSun 0:00 1:00 S
Rule Falk 1938 1942 - Mar Sun>=19 0:00 0 -
Rule Falk 1939 only - Oct 1 0:00 1:00 S
Rule Falk 1940 1942 - Sep lastSun 0:00 1:00 S
Rule Falk 1943 only - Jan 1 0:00 0 -
Rule Falk 1983 only - Sep lastSun 0:00 1:00 S
Rule Falk 1984 1985 - Apr lastSun 0:00 0 -
Rule Falk 1984 only - Sep 16 0:00 1:00 S
Rule Falk 1985 1995 - Sep Sun>=9 0:00 1:00 S
Rule Falk 1986 max - Apr Sun>=16 0:00 0 -
Rule Falk 1996 max - Sep Sun>=8 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Stanley -3:51:24 - LMT 1890
-3:51:24 - SMT 1912 Mar 12 # Stanley Mean Time
-4:00 Falk FK%sT 1983 May # Falkland Is Time
-3:00 Falk FK%sT 1985 Sep 15
-4:00 Falk FK%sT
# French Guiana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Cayenne -3:29:20 - LMT 1911 Jul
-4:00 - GFT 1967 Oct # French Guiana Time
-3:00 - GFT
# Guyana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Guyana -3:52:40 - LMT 1915 Mar # Georgetown
-3:45 - GBGT 1966 May 26 # Br Guiana Time
-3:45 - GYT 1975 Jul 31 # Guyana Time
-3:00 - GYT 1991
# IATA SSIM (1996-06) says -4:00. Assume a 1991 switch.
-4:00 - GYT
# Paraguay
# From Bob Devine (1988-01-28):
# Paraguay: First day in October to last in March. Midnight switch??
# Since 1980.
# From U. S. Naval Observatory (1989-01-19):
# PARAGUAY 4 H BEHIND UTC
# PARAGUAY 3 H BEHIND UTC OCT 1, '88-MAR 31, '89
# From Shanks (1991):
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Para 1975 1978 - Oct 1 0:00 1:00 S
Rule Para 1975 1978 - Mar 1 0:00 0 -
# Shanks says 1979 was all DST.
Rule Para 1980 1991 - Apr 1 0:00 0 -
Rule Para 1980 1988 - Oct 1 0:00 1:00 S
Rule Para 1989 only - Oct 22 0:00 1:00 S
Rule Para 1990 only - Oct 1 0:00 1:00 S
Rule Para 1991 only - Oct 6 0:00 1:00 S
Rule Para 1992 only - Mar 1 0:00 0 -
Rule Para 1992 only - Oct 5 0:00 1:00 S
Rule Para 1993 only - Mar 31 0:00 0 -
Rule Para 1993 1995 - Oct 1 0:00 1:00 S
Rule Para 1994 1995 - Feb lastSun 0:00 0 -
Rule Para 1996 1998 - Mar 1 0:00 0 -
Rule Para 1996 max - Oct Sun>=1 0:00 1:00 S
Rule Para 1999 max - Feb lastSat 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Asuncion -3:50:40 - LMT 1890
-3:50:40 - AMT 1931 Oct 10 # Asuncion Mean Time
-4:00 - PYT 1972 Oct # Paraguay Time
-3:00 - PYT 1974 Apr
-4:00 Para PY%sT
# Peru
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Peru 1938 only - Jan 1 0:00 1:00 S
Rule Peru 1938 only - Apr 1 0:00 0 -
Rule Peru 1938 1939 - Sep lastSun 0:00 1:00 S
Rule Peru 1939 1940 - Mar Sun>=24 0:00 0 -
Rule Peru 1987 only - Jan 1 0:00 1:00 S
Rule Peru 1987 only - Apr 1 0:00 0 -
Rule Peru 1990 only - Jan 1 0:00 1:00 S
Rule Peru 1990 only - Apr 1 0:00 0 -
Rule Peru 1993 only - Jan 1 0:00 1:00 S
Rule Peru 1993 only - Apr 1 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Lima -5:08:12 - LMT 1890
-5:09 - LMT 1908 Jul 28 # Lima Mean Time
-5:00 Peru PE%sT # Peru Time
# South Georgia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/South_Georgia -2:26:08 - LMT 1890 # Grytviken
-2:00 - GST # South Georgia Time
# South Sandwich Is
# uninhabited; scientific personnel have wintered
# Suriname
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Paramaribo -3:40:40 - LMT 1911
-3:40:52 - PMT 1935 # Paramaribo Mean Time
-3:40:36 - PMT 1945 Oct # The capital moved?
-3:30 - NEGT 1975 Nov 20 # Dutch Guiana Time
-3:30 - SRT 1984 Oct # Suriname Time
-3:00 - SRT
# Trinidad and Tobago
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Port_of_Spain -4:06:04 - LMT 1912 Mar 2
-4:00 - AST
# Uruguay
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# Uruguay wins the prize for the strangest peacetime manipulation of the rules.
# From Shanks (1991):
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman gives 1923 Oct 1; go with Shanks.
Rule Uruguay 1923 only - Oct 2 0:00 0:30 HS
Rule Uruguay 1924 1926 - Apr 1 0:00 0 -
Rule Uruguay 1924 1925 - Oct 1 0:00 0:30 HS
Rule Uruguay 1933 1935 - Oct lastSun 0:00 0:30 HS
# Shanks gives 1935 Apr 1 0:00 and 1936 Mar 30 0:00; go with Whitman.
Rule Uruguay 1934 1936 - Mar Sat>=25 23:30s 0 -
Rule Uruguay 1936 only - Nov 1 0:00 0:30 HS
Rule Uruguay 1937 1941 - Mar lastSun 0:00 0 -
# Whitman gives 1937 Oct 3; go with Shanks.
Rule Uruguay 1937 1940 - Oct lastSun 0:00 0:30 HS
# Whitman gives 1941 Oct 24 - 1942 Mar 27, 1942 Dec 14 - 1943 Apr 13,
# and 1943 Apr 13 ``to present time''; go with Shanks.
Rule Uruguay 1941 only - Aug 1 0:00 0 -
Rule Uruguay 1942 only - Jan 1 0:00 0:30 HS
Rule Uruguay 1942 only - Dec 14 0:00 1:00 S
Rule Uruguay 1943 only - Mar 14 0:00 0 -
Rule Uruguay 1959 only - May 24 0:00 1:00 S
Rule Uruguay 1959 only - Nov 15 0:00 0 -
Rule Uruguay 1960 only - Jan 17 0:00 1:00 S
Rule Uruguay 1960 only - Mar 6 0:00 0 -
Rule Uruguay 1965 1967 - Apr Sun>=1 0:00 1:00 S
Rule Uruguay 1965 only - Sep 26 0:00 0 -
Rule Uruguay 1966 1967 - Oct 31 0:00 0 -
Rule Uruguay 1968 1970 - May 27 0:00 0:30 HS
Rule Uruguay 1968 1970 - Dec 2 0:00 0 -
Rule Uruguay 1972 only - Apr 24 0:00 1:00 S
Rule Uruguay 1972 only - Aug 15 0:00 0 -
Rule Uruguay 1974 only - Mar 10 0:00 0:30 HS
Rule Uruguay 1974 only - Dec 22 0:00 1:00 S
Rule Uruguay 1976 only - Oct 1 0:00 0 -
Rule Uruguay 1977 only - Dec 4 0:00 1:00 S
Rule Uruguay 1978 only - Apr 1 0:00 0 -
Rule Uruguay 1979 only - Oct 1 0:00 1:00 S
Rule Uruguay 1980 only - May 1 0:00 0 -
Rule Uruguay 1987 only - Dec 14 0:00 1:00 S
Rule Uruguay 1988 only - Mar 14 0:00 0 -
Rule Uruguay 1988 only - Dec 11 0:00 1:00 S
Rule Uruguay 1989 only - Mar 12 0:00 0 -
Rule Uruguay 1989 only - Oct 29 0:00 1:00 S
Rule Uruguay 1990 1992 - Mar Sun>=1 0:00 0 -
Rule Uruguay 1990 1991 - Oct Sun>=21 0:00 1:00 S
# Shanks's 4th edition (1995) says no DST was observed in 1990/1 and 1991/2,
# and that 1992/3's DST was from 10-25 to 03-01. Go with IATA.
Rule Uruguay 1992 only - Oct 18 0:00 1:00 S
Rule Uruguay 1993 only - Feb 28 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Montevideo -3:44:44 - LMT 1898 Jun 28
-3:44:44 - MMT 1920 May 1 # Montevideo MT
-3:30 Uruguay UY%sT 1942 Dec 14 # Uruguay Time
-3:00 Uruguay UY%sT
# Venezuela
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Caracas -4:27:44 - LMT 1890
-4:27:44 - CMT 1912 Feb 12 # Caracas Mean Time
-4:30 - VET 1965 # Venezuela Time
-4:00 - VET
| # @(#)southamerica 7.32
# This data is by no means authoritative; if you think you know better,
# go ahead and edit the file (and please send any changes to
# tz@elsie.nci.nih.gov for general use in the future).
# From Paul Eggert <eggert@twinsun.com> (1999-07-07):
# A good source for time zone historical data outside the U.S. is
# Thomas G. Shanks, The International Atlas (5th edition),
# San Diego: ACS Publications, Inc. (1999).
#
# Gwillim Law <LAW@encmail.encompass.com> writes that a good source
# for recent time zone data is the International Air Transport
# Association's Standard Schedules Information Manual (IATA SSIM),
# published semiannually. Law sent in several helpful summaries
# of the IATA's data after 1990.
#
# Except where otherwise noted, Shanks is the source for entries through 1990,
# and IATA SSIM is the source for entries after 1990.
#
# Earlier editions of these tables used the North American style (e.g. ARST and
# ARDT for Argentine Standard and Daylight Time), but the following quote
# suggests that it's better to use European style (e.g. ART and ARST).
# I suggest the use of _Summer time_ instead of the more cumbersome
# _daylight-saving time_. _Summer time_ seems to be in general use
# in Europe and South America.
# -- E O Cutler, _New York Times_ (1937-02-14), quoted in
# H L Mencken, _The American Language: Supplement I_ (1960), p 466
#
# Earlier editions of these tables also used the North American style
# for time zones in Brazil, but this was incorrect, as Brazilians say
# "summer time". Reinaldo Goulart, a Sao Paulo businessman active in
# the railroad sector, writes (1999-07-06):
# The subject of time zones is currently a matter of discussion/debate in
# Brazil. Let's say that "the Brasilia time" is considered the
# "official time" because Brasilia is the capital city.
# The other three time zones are called "Brasilia time "minus one" or
# "plus one" or "plus two". As far as I know there is no such
# name/designation as "Eastern Time" or "Central Time".
# So I invented the following (English-language) abbreviations for now.
# Corrections are welcome!
# std dst
# -2:00 FNT FNST Fernando de Noronha
# -3:00 BRT BRST Brasilia
# -4:00 AMT AMST Amazon
# -5:00 ACT ACST Acre
###############################################################################
###############################################################################
# Argentina
# From Bob Devine (1988-01-28):
# Argentina: first Sunday in October to first Sunday in April since 1976.
# Double Summer time from 1969 to 1974. Switches at midnight.
# From U. S. Naval Observatory (1988-01-199):
# ARGENTINA 3 H BEHIND UTC
# From Hernan G. Otero <hernan@isoft.com.ar> (1995-06-26):
# I am sending modifications to the Argentine time zone table...
# AR was chosen because they are the ISO letters that represent Argentina.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Arg 1930 only - Dec 1 0:00 1:00 S
Rule Arg 1931 only - Apr 1 0:00 0 -
Rule Arg 1931 only - Oct 15 0:00 1:00 S
Rule Arg 1932 1940 - Mar 1 0:00 0 -
Rule Arg 1932 1939 - Nov 1 0:00 1:00 S
Rule Arg 1940 only - Jul 1 0:00 1:00 S
Rule Arg 1941 only - Jun 15 0:00 0 -
Rule Arg 1941 only - Oct 15 0:00 1:00 S
Rule Arg 1943 only - Aug 1 0:00 0 -
Rule Arg 1943 only - Oct 15 0:00 1:00 S
Rule Arg 1946 only - Mar 1 0:00 0 -
Rule Arg 1946 only - Oct 1 0:00 1:00 S
Rule Arg 1963 only - Oct 1 0:00 0 -
Rule Arg 1963 only - Dec 15 0:00 1:00 S
Rule Arg 1964 1966 - Mar 1 0:00 0 -
Rule Arg 1964 1966 - Oct 15 0:00 1:00 S
Rule Arg 1967 only - Apr 1 0:00 0 -
Rule Arg 1967 1968 - Oct Sun<=7 0:00 1:00 S
Rule Arg 1968 1969 - Apr Sun<=7 0:00 0 -
Rule Arg 1974 only - Jan 23 0:00 1:00 S
Rule Arg 1974 only - May 1 0:00 0 -
Rule Arg 1974 1976 - Oct Sun<=7 0:00 1:00 S
Rule Arg 1975 1977 - Apr Sun<=7 0:00 0 -
Rule Arg 1985 only - Nov 2 0:00 1:00 S
Rule Arg 1986 only - Mar 14 0:00 0 -
Rule Arg 1986 1987 - Oct 25 0:00 1:00 S
Rule Arg 1987 only - Feb 13 0:00 0 -
Rule Arg 1988 only - Feb 7 0:00 0 -
Rule Arg 1988 only - Dec 1 0:00 1:00 S
#
# From Hernan G. Otero <hernan@isoft.com.ar> (1995-06-26):
# These corrections were contributed by InterSoft Argentina S.A.,
# obtaining the data from the:
# Talleres de Hidrografia Naval Argentina
# (Argentine Naval Hydrography Institute)
#
# Shanks stops after 1992-03-01; go with Otero.
Rule Arg 1989 1993 - Mar Sun>=1 0:00 0 -
Rule Arg 1989 1992 - Oct Sun>=15 0:00 1:00 S
#
# From Hernan G. Otero <hernan@isoft.com.ar> (1995-06-26):
# From this moment on, the law that mandated the daylight saving
# time corrections was derogated and no more modifications
# to the time zones (for daylight saving) are now made.
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
# Buenos Aires (BA), Distrito Federal (DF), Santa Cruz (SC),
# Tierra del Fuego (TF) & Antartida e Islas
Zone America/Buenos_Aires -3:53:48 - LMT 1894 Nov
-4:16:44 - CMT 1920 May # Cordoba Mean Time
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT
#
# Santa Fe (SF), Entre Rios (ER), Corrientes (CN), Misiones (MN), Chaco (CC),
# Formosa (FM), La Pampa (LP), Chubut (CH)
Zone America/Rosario -4:02:40 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1991 Jul
-3:00 - ART
#
# Cordoba (CB), Santiago del Estero (SE), Salta (SA), Tucuman (TM), La Rioja (LR), San Juan (SJ), San Luis (SL),
# Neuquen (NQ), Rio Negro (RN)
Zone America/Cordoba -4:16:44 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1990 Jul
-3:00 - ART
#
# Jujuy (JY)
Zone America/Jujuy -4:21:12 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1991 Mar 3
-4:00 - WART 1991 Oct 6
-4:00 1:00 WARST 1992 Mar 15
-4:00 - WART 1992 Oct 18
-3:00 - ART
#
# Catamarca (CT)
Zone America/Catamarca -4:23:08 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1990 Jul
-3:00 - ART 1991 Jul
-3:00 Arg AR%sT 1992 Jul
-3:00 - ART
#
# Mendoza (MZ)
Zone America/Mendoza -4:35:16 - LMT 1894 Nov
-4:16:44 - CMT 1920 May
-4:00 - ART 1930 Dec
-4:00 Arg AR%sT 1969 Oct 5
-3:00 Arg AR%sT 1991 Mar 3
-4:00 - WART 1991 Oct 15
-4:00 1:00 WARST 1992 Mar 1
-4:00 - WART 1992 Oct 18
-3:00 - ART
# Aruba
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Aruba -4:40:24 - LMT 1912 Feb 12 # Oranjestad
-4:30 - ANT 1965 # Netherlands Antilles Time
-4:00 - AST
# Bolivia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/La_Paz -4:32:36 - LMT 1890
-4:32:36 - LPMT 1931 Oct 15 # La Paz Mean Time
-4:32:36 1:00 BOST 1932 Mar 21 # Bolivia ST
-4:00 - BOT # Bolivia Time
# Brazil
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# The mayor of Rio recently attempted to change the time zone rules
# just in his city, in order to leave more summer time for the tourist trade.
# The rule change lasted only part of the day;
# the federal government refused to follow the city's rules, and business
# was in a chaos, so the mayor backed down that afternoon.
# From IATA SSIM (1996-02):
# _Only_ the following states in BR1 observe DST: Rio Grande do Sul (RS),
# Santa Catarina (SC), Parana (PR), Sao Paulo (SP), Rio de Janeiro (RJ),
# Espirito Santo (ES), Minas Gerais (MG), Bahia (BA), Goias (GO),
# Distrito Federal (DF), Tocantins (TO), Sergipe [SE] and Alagoas [AL].
# [The last three states are new to this issue of the IATA SSIM.]
# From Gwillim Law (1996-10-07):
# Geography, history (Tocantins was part of Goias until 1989), and other
# sources of time zone information lead me to believe that AL, SE, and TO were
# always in BR1, and so the only change was whether or not they observed DST....
# The earliest issue of the SSIM I have is 2/91. Each issue from then until
# 9/95 says that DST is observed only in the ten states I quoted from 9/95,
# along with Mato Grosso (MT) and Mato Grosso do Sul (MS), which are in BR2
# (UTC-4).... The other two time zones given for Brazil are BR3, which is
# UTC-5, no DST, and applies only in the state of Acre (AC); and BR4, which is
# UTC-2, and applies to Fernando de Noronha (formerly FN, but I believe it's
# become part of the state of Pernambuco). The boundary between BR1 and BR2
# has never been clearly stated. They've simply been called East and West.
# However, some conclusions can be drawn from another IATA manual: the Airline
# Coding Directory, which lists close to 400 airports in Brazil. For each
# airport it gives a time zone which is coded to the SSIM. From that
# information, I'm led to conclude that the states of Amapa (AP), Ceara (CE),
# Maranhao (MA), Paraiba (PR), Pernambuco (PE), Piaui (PI), and Rio Grande do
# Norte (RN), and the eastern part of Para (PA) are all in BR1 without DST.
# From Marcos Tadeu (1998-09-27):
# <a href="http://pcdsh01.on.br/verao1.html">
# Brazilian official page
# </a>
#
# From Paul Eggert (1998-10-29):
# The official decrees referenced below are taken from
# <a href="http://pcdsh01.on.br/DecHV.html">
# Decretos sobre o Horario de Verao no Brasil
# </a> (1999-10-04, in Portuguese).
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Decree <a href="http://pcdsh01.on.br/HV20466.htm">20,466</a> (1931-10-01)
# Decree <a href="http://pcdsh01.on.br/HV21896.htm">21,896</a> (1932-01-10)
Rule Brazil 1931 only - Oct 3 11:00 1:00 S
Rule Brazil 1932 1933 - Apr 1 0:00 0 -
Rule Brazil 1932 only - Oct 3 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV23195.htm">23,195</a> (1933-10-10)
# revoked DST.
# Decree <a href="http://pcdsh01.on.br/HV27496.htm">27,496</a> (1949-11-24)
# Decree <a href="http://pcdsh01.on.br/HV27998.htm">27,998</a> (1950-04-13)
Rule Brazil 1949 1952 - Dec 1 0:00 1:00 S
Rule Brazil 1950 only - Apr 16 1:00 0 -
Rule Brazil 1951 1952 - Apr 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV32308.htm">32,308</a> (1953-02-24)
Rule Brazil 1953 only - Mar 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV34724.htm">34,724</a> (1953-11-30)
# revoked DST.
# Decree <a href="http://pcdsh01.on.br/HV52700.htm">52,700</a> (1963-10-18)
# established DST from 1963-10-23 00:00 to 1964-02-29 00:00
# in SP, RJ, GB, MG, ES, due to the prolongation of the drought.
# Decree <a href="http://pcdsh01.on.br/HV53071.htm">53,071</a> (1963-12-03)
# extended the above decree to all of the national territory on 12-09.
Rule Brazil 1963 only - Dec 9 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV53604.htm">53,604</a> (1964-02-25)
# extended summer time by one day to 1964-03-01 00:00 (start of school).
Rule Brazil 1964 only - Mar 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV55639.htm">55,639</a> (1965-01-27)
Rule Brazil 1965 only - Jan 31 0:00 1:00 S
Rule Brazil 1965 only - Mar 31 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV57303.htm">57,303</a> (1965-11-22)
Rule Brazil 1965 only - Dec 1 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV57843.htm">57,843</a> (1966-02-18)
Rule Brazil 1966 1968 - Mar 1 0:00 0 -
Rule Brazil 1966 1967 - Nov 1 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/HV63429.htm">63,429</a> (1968-10-15)
# revoked DST.
# Decree <a href="http://pcdsh01.on.br/HV91698.htm">91,698</a> (1985-09-27)
Rule Brazil 1985 only - Nov 2 0:00 1:00 S
# Decree 92,310 (1986-01-21)
# Decree 92,463 (1986-03-13)
Rule Brazil 1986 only - Mar 15 0:00 0 -
# Decree 93,316 (1986-10-01)
Rule Brazil 1986 only - Oct 25 0:00 1:00 S
Rule Brazil 1987 only - Feb 14 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV94922.htm">94,922</a> (1987-09-22)
Rule Brazil 1987 only - Oct 25 0:00 1:00 S
Rule Brazil 1988 only - Feb 7 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV96676.htm">96,676</a> (1988-09-12)
# except for the states of AC, AM, PA, RR, RO, and AP (then a territory)
Rule Brazil 1988 only - Oct 16 0:00 1:00 S
Rule Brazil 1989 only - Jan 29 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV98077.htm">98,077</a> (1989-08-21)
# with the same exceptions
Rule Brazil 1989 only - Oct 15 0:00 1:00 S
Rule Brazil 1990 only - Feb 11 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV99530.htm">99,530</a> (1990-09-17)
# adopted by RS, SC, PR, SP, RJ, ES, MG, GO, MS, DF.
# Decree 99,629 (1990-10-19) adds BA, MT.
Rule Brazil 1990 only - Oct 21 0:00 1:00 S
Rule Brazil 1991 only - Feb 17 0:00 0 -
# <a href="http://pcdsh01.on.br/HV1991.htm">Unnumbered decree</a> (1991-09-25)
# adopted by RS, SC, PR, SP, RJ, ES, MG, BA, GO, MT, MS, DF.
Rule Brazil 1991 only - Oct 20 0:00 1:00 S
Rule Brazil 1992 only - Feb 9 0:00 0 -
# <a href="http://pcdsh01.on.br/HV1992.htm">Unnumbered decree</a> (1992-10-16)
# adopted by same states.
Rule Brazil 1992 only - Oct 25 0:00 1:00 S
Rule Brazil 1993 only - Jan 31 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV942.htm">942</a> (1993-09-28)
# adopted by same states, plus AM.
# Decree <a href="http://pcdsh01.on.br/HV1252.htm">1,252</a> (1994-09-22)
# adopted by same states, minus AM.
# Decree <a href="http://pcdsh01.on.br/HV1636.htm">1,636</a> (1995-09-14)
# adopted by same states, plus TO.
# Decree <a href="http://pcdsh01.on.br/HV1674.htm">1,674</a> (1995-10-13)
# adds AL, SE.
Rule Brazil 1993 1995 - Oct Sun>=11 0:00 1:00 S
Rule Brazil 1994 1995 - Feb Sun>=15 0:00 0 -
Rule Brazil 1996 only - Feb 11 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/HV2000.htm">2,000</a> (1996-09-04)
# adopted by same states, minus AL, SE.
Rule Brazil 1996 only - Oct 6 0:00 1:00 S
Rule Brazil 1997 only - Feb 16 0:00 0 -
# From Daniel C. Sobral <dcs@gns.com.br> (1998-02-12):
# In 1997, the DS began on October 6. The stated reason was that
# because international television networks ignored Brazil's policy on DS,
# they bought the wrong times on satellite for coverage of Pope's visit.
# This year, the ending date of DS was postponed to March 1
# to help dealing with the shortages of electric power.
#
# From Paul Eggert (1998-02-25):
# <a href="http://churchnet.ucsm.ac.uk/news/files2/news165.htm">
# Brazil Prepares for Papal Visit
# </a>,
# Church Net UK (1997-10-02).
#
# Decree 2,317 (1997-09-04), adopted by same states.
Rule Brazil 1997 only - Oct 6 0:00 1:00 S
# Decree <a href="http://pcdsh01.on.br/figuras/HV2495.JPG">2,495</a>
# (1998-02-10)
Rule Brazil 1998 only - Mar 1 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/figuras/Hv98.jpg">2,780</a> (1998-09-11)
# adopted by the same states as before.
Rule Brazil 1998 only - Oct 11 0:00 1:00 S
Rule Brazil 1999 only - Feb 21 0:00 0 -
# Decree <a href="http://pcdsh01.on.br/figuras/HV3150.gif">3,150</a>
# (1999-08-23) adopted by same states.
# Decree <a href="http://pcdsh01.on.br/DecHV99.gif">3,188</a> (1999-09-30)
# adds SE, AL, PE, PR, RN, CE, PI, MA and RR.
# These give only one year's rules. After that, the rules are guesses
# and are quite possibly wrong, but are more likely than no DST at all.
Rule Brazil 1999 max - Oct Sun>=1 0:00 1:00 S
Rule Brazil 2000 max - Feb lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
#
# Fernando de Noronha
Zone America/Noronha -2:09:40 - LMT 1914
-2:00 Brazil FN%sT 1990 Sep 17
-2:00 - FNT
#
# Amapa (AP), east Para (PA)
Zone America/Belem -3:13:56 - LMT 1914
-3:00 Brazil BR%sT 1988 Sep 12
-3:00 - BRT
#
# Maranhao (MA), Piaui (PI), Ceara (CE), Rio Grande do Norte (RN),
# Paraiba (PB), Pernambuco (PE) (except Fernando de Noronha)
Zone America/Fortaleza -2:34:00 - LMT 1914
-3:00 Brazil BR%sT 1990 Sep 17
-3:00 - BRT 1999 Sep 30
-3:00 Brazil BR%sT
#
# Tocantins (TO)
Zone America/Araguaina -3:12:48 - LMT 1914
-3:00 Brazil BR%sT 1990 Sep 17
-3:00 - BRT 1995 Sep 14
-3:00 Brazil BR%sT
#
# Alagoas (AL), Sergipe (SE)
Zone America/Maceio -2:22:52 - LMT 1914
-3:00 Brazil BR%sT 1990 Sep 17
-3:00 - BRT 1995 Oct 13
-3:00 Brazil BR%sT 1996 Sep 4
-3:00 - BRT 1999 Sep 30
-3:00 Brazil BR%sT
#
# Bahia (BA), Goias (GO), Distrito Federal (DF), Minas Gerais (MG),
# Espirito Santo (ES), Rio de Janeiro (RJ), Sao Paulo (SP), Parana (PR),
# Santa Catarina (SC), Rio Grande do Sul (RS)
Zone America/Sao_Paulo -3:06:28 - LMT 1914
-3:00 Brazil BR%sT 1963 Oct 23 00:00
-3:00 1:00 BRST 1964
-3:00 Brazil BR%sT
#
# Mato Grosso, Mato Grosso do Sul
Zone America/Cuiaba -3:44:20 - LMT 1914
-4:00 Brazil AM%sT
#
# west Para (PA), Rondonia (RO)
Zone America/Porto_Velho -4:15:36 - LMT 1914
-4:00 Brazil AM%sT 1988 Sep 12
-4:00 - AMT
#
# Roraima (RR)
Zone America/Boa_Vista -4:02:40 - LMT 1914
-4:00 Brazil AM%sT 1988 Sep 12
-4:00 - AMT 1999 Sep 30
-4:00 Brazil AM%sT
#
# Amazonas (AM)
Zone America/Manaus -4:00:04 - LMT 1914
-4:00 Brazil AM%sT 1988 Sep 12
-4:00 - AMT 1993 Sep 28
-4:00 Brazil AM%sT 1994 Sep 22
-4:00 - AMT
#
# Acre (AC)
# Rio_Branco is too ambiguous, since there's a Rio Branco in Uruguay too.
Zone America/Porto_Acre -4:31:12 - LMT 1914
-5:00 Brazil AC%sT 1988 Sep 12
-5:00 - ACT
#
# Martin Vaz and Trinidade are like America/Noronha.
# Chile
# From Eduardo Krell (1995-10-19):
# The law says to switch to DST at midnight [24:00] on the second SATURDAY
# of October.... The law is the same for March and October.
# (1998-09-29):
# Because of the drought this year, the government decided to go into
# DST earlier (saturday 9/26 at 24:00). This is a one-time change only ...
# (unless there's another dry season next year, I guess).
# From Julio I. Pacheco Troncoso (1999-03-18):
# Because of the same drought, the government decided to end DST later,
# on April 3, (one-time change).
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Chile 1918 only - Sep 1 0:00 1:00 S
Rule Chile 1919 only - Jul 2 0:00 0 -
Rule Chile 1927 1931 - Sep 1 0:00 1:00 S
Rule Chile 1928 1932 - Apr 1 0:00 0 -
Rule Chile 1969 1997 - Oct Sun>=9 0:00 1:00 S
Rule Chile 1970 1998 - Mar Sun>=9 0:00 0 -
Rule Chile 1998 only - Sep 27 0:00 1:00 S
Rule Chile 1999 only - Apr 4 0:00 0 -
Rule Chile 1999 max - Oct Sun>=9 0:00 1:00 S
Rule Chile 2000 max - Mar Sun>=9 0:00 0 -
# IATA SSIM anomalies: (1990-09) says 1990-09-16; (1992-02) says 1992-03-14;
# (1996-09) says 1998-03-08. Ignore these.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Santiago -4:42:40 - LMT 1890
-4:42:40 - SMT 1910 # Santiago Mean Time
-5:00 Chile CL%sT 1932 Sep # Chile Time
-4:00 Chile CL%sT
Zone Pacific/Easter -7:17:28 - LMT 1890 # Mataveri
-7:17:28 - MMT 1932 Sep # Mataveri Mean Time
-7:00 Chile EAS%sT 1982 Mar 14 # Easter I Time
-6:00 Chile EAS%sT
#
# Whitman says Juan Fernandez Is are like America/Santiago.
# San Ambrosio, San Felix
# no information; probably like America/Santiago
# Colombia
# Shanks specifies 24:00 for 1992 transition times; go with IATA,
# as it seems implausible to change clocks at midnight New Year's Eve.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule CO 1992 only - May 2 0:00 1:00 S
Rule CO 1992 only - Dec 31 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Bogota -4:56:20 - LMT 1884 Mar 13
-4:56:20 - BMT 1914 Nov 23 # Bogota Mean Time
-5:00 CO CO%sT # Colombia Time
# Malpelo, Providencia, San Andres
# no information; probably like America/Bogota
# Curacao
# Shanks says that Bottom and Oranjestad have been at -4:00 since
# standard time was introduced on 1912-03-02; and that Kralendijk and Rincon
# used Kralendijk Mean Time (-4:33:08) from 1912-02-02 to 1965-01-01.
# This all predates our 1970 cutoff, though.
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Curacao -4:35:44 - LMT 1912 Feb 12 # Willemstad
-4:30 - ANT 1965 # Netherlands Antilles Time
-4:00 - AST
# Ecuador
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Guayaquil -5:19:20 - LMT 1890
-5:14:00 - QMT 1931 # Quito Mean Time
-5:00 - ECT # Ecuador Time
Zone Pacific/Galapagos -5:58:24 - LMT 1931 # Puerto Baquerizo Moreno
-5:00 - ECT 1986
-6:00 - GALT # Galapagos Time
# Falklands
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Falk 1937 1938 - Sep lastSun 0:00 1:00 S
Rule Falk 1938 1942 - Mar Sun>=19 0:00 0 -
Rule Falk 1939 only - Oct 1 0:00 1:00 S
Rule Falk 1940 1942 - Sep lastSun 0:00 1:00 S
Rule Falk 1943 only - Jan 1 0:00 0 -
Rule Falk 1983 only - Sep lastSun 0:00 1:00 S
Rule Falk 1984 1985 - Apr lastSun 0:00 0 -
Rule Falk 1984 only - Sep 16 0:00 1:00 S
Rule Falk 1985 1995 - Sep Sun>=9 0:00 1:00 S
Rule Falk 1986 max - Apr Sun>=16 0:00 0 -
Rule Falk 1996 max - Sep Sun>=8 0:00 1:00 S
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/Stanley -3:51:24 - LMT 1890
-3:51:24 - SMT 1912 Mar 12 # Stanley Mean Time
-4:00 Falk FK%sT 1983 May # Falkland Is Time
-3:00 Falk FK%sT 1985 Sep 15
-4:00 Falk FK%sT
# French Guiana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Cayenne -3:29:20 - LMT 1911 Jul
-4:00 - GFT 1967 Oct # French Guiana Time
-3:00 - GFT
# Guyana
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Guyana -3:52:40 - LMT 1915 Mar # Georgetown
-3:45 - GBGT 1966 May 26 # Br Guiana Time
-3:45 - GYT 1975 Jul 31 # Guyana Time
-3:00 - GYT 1991
# IATA SSIM (1996-06) says -4:00. Assume a 1991 switch.
-4:00 - GYT
# Paraguay
# From Paul Eggert (1999-10-29):
# Shanks (1999) says that spring transitions are from 01:00 -> 02:00,
# and autumn transitions are from 00:00 -> 23:00. Go with earlier
# editions of Shanks, and with the IATA, who say transitions occur at 00:00.
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Para 1975 1988 - Oct 1 0:00 1:00 S
Rule Para 1975 1978 - Mar 1 0:00 0 -
Rule Para 1979 1991 - Apr 1 0:00 0 -
Rule Para 1989 only - Oct 22 0:00 1:00 S
Rule Para 1990 only - Oct 1 0:00 1:00 S
Rule Para 1991 only - Oct 6 0:00 1:00 S
Rule Para 1992 only - Mar 1 0:00 0 -
Rule Para 1992 only - Oct 5 0:00 1:00 S
Rule Para 1993 only - Mar 31 0:00 0 -
Rule Para 1993 1995 - Oct 1 0:00 1:00 S
Rule Para 1994 1995 - Feb lastSun 0:00 0 -
Rule Para 1996 only - Mar 1 0:00 0 -
# IATA SSIM (1997-09) says Mar 1; go with Shanks.
Rule Para 1997 only - Feb lastSun 0:00 0 -
Rule Para 1998 only - Mar 1 0:00 0 -
Rule Para 1996 max - Oct Sun>=1 0:00 1:00 S
# IATA SSIM (1999-02) says lastSat, not lastSun; go with Shanks.
Rule Para 1999 max - Feb lastSun 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Asuncion -3:50:40 - LMT 1890
-3:50:40 - AMT 1931 Oct 10 # Asuncion Mean Time
-4:00 - PYT 1972 Oct # Paraguay Time
-3:00 - PYT 1974 Apr
-4:00 Para PY%sT
# Peru
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Peru 1938 only - Jan 1 0:00 1:00 S
Rule Peru 1938 only - Apr 1 0:00 0 -
Rule Peru 1938 1939 - Sep lastSun 0:00 1:00 S
Rule Peru 1939 1940 - Mar Sun>=24 0:00 0 -
Rule Peru 1987 only - Jan 1 0:00 1:00 S
Rule Peru 1987 only - Apr 1 0:00 0 -
Rule Peru 1990 only - Jan 1 0:00 1:00 S
Rule Peru 1990 only - Apr 1 0:00 0 -
Rule Peru 1993 only - Jan 1 0:00 1:00 S
Rule Peru 1993 only - Apr 1 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Lima -5:08:12 - LMT 1890
-5:09 - LMT 1908 Jul 28 # Lima Mean Time
-5:00 Peru PE%sT # Peru Time
# South Georgia
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone Atlantic/South_Georgia -2:26:08 - LMT 1890 # Grytviken
-2:00 - GST # South Georgia Time
# South Sandwich Is
# uninhabited; scientific personnel have wintered
# Suriname
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Paramaribo -3:40:40 - LMT 1911
-3:40:52 - PMT 1935 # Paramaribo Mean Time
-3:40:36 - PMT 1945 Oct # The capital moved?
-3:30 - NEGT 1975 Nov 20 # Dutch Guiana Time
-3:30 - SRT 1984 Oct # Suriname Time
-3:00 - SRT
# Trinidad and Tobago
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Port_of_Spain -4:06:04 - LMT 1912 Mar 2
-4:00 - AST
# Uruguay
# From Paul Eggert <eggert@twinsun.com> (1993-11-18):
# Uruguay wins the prize for the strangest peacetime manipulation of the rules.
# From Shanks:
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
# Whitman gives 1923 Oct 1; go with Shanks.
Rule Uruguay 1923 only - Oct 2 0:00 0:30 HS
Rule Uruguay 1924 1926 - Apr 1 0:00 0 -
Rule Uruguay 1924 1925 - Oct 1 0:00 0:30 HS
Rule Uruguay 1933 1935 - Oct lastSun 0:00 0:30 HS
# Shanks gives 1935 Apr 1 0:00 and 1936 Mar 30 0:00; go with Whitman.
Rule Uruguay 1934 1936 - Mar Sat>=25 23:30s 0 -
Rule Uruguay 1936 only - Nov 1 0:00 0:30 HS
Rule Uruguay 1937 1941 - Mar lastSun 0:00 0 -
# Whitman gives 1937 Oct 3; go with Shanks.
Rule Uruguay 1937 1940 - Oct lastSun 0:00 0:30 HS
# Whitman gives 1941 Oct 24 - 1942 Mar 27, 1942 Dec 14 - 1943 Apr 13,
# and 1943 Apr 13 ``to present time''; go with Shanks.
Rule Uruguay 1941 only - Aug 1 0:00 0 -
Rule Uruguay 1942 only - Jan 1 0:00 0:30 HS
Rule Uruguay 1942 only - Dec 14 0:00 1:00 S
Rule Uruguay 1943 only - Mar 14 0:00 0 -
Rule Uruguay 1959 only - May 24 0:00 1:00 S
Rule Uruguay 1959 only - Nov 15 0:00 0 -
Rule Uruguay 1960 only - Jan 17 0:00 1:00 S
Rule Uruguay 1960 only - Mar 6 0:00 0 -
Rule Uruguay 1965 1967 - Apr Sun>=1 0:00 1:00 S
Rule Uruguay 1965 only - Sep 26 0:00 0 -
Rule Uruguay 1966 1967 - Oct 31 0:00 0 -
Rule Uruguay 1968 1970 - May 27 0:00 0:30 HS
Rule Uruguay 1968 1970 - Dec 2 0:00 0 -
Rule Uruguay 1972 only - Apr 24 0:00 1:00 S
Rule Uruguay 1972 only - Aug 15 0:00 0 -
Rule Uruguay 1974 only - Mar 10 0:00 0:30 HS
Rule Uruguay 1974 only - Dec 22 0:00 1:00 S
Rule Uruguay 1976 only - Oct 1 0:00 0 -
Rule Uruguay 1977 only - Dec 4 0:00 1:00 S
Rule Uruguay 1978 only - Apr 1 0:00 0 -
Rule Uruguay 1979 only - Oct 1 0:00 1:00 S
Rule Uruguay 1980 only - May 1 0:00 0 -
Rule Uruguay 1987 only - Dec 14 0:00 1:00 S
Rule Uruguay 1988 only - Mar 14 0:00 0 -
Rule Uruguay 1988 only - Dec 11 0:00 1:00 S
Rule Uruguay 1989 only - Mar 12 0:00 0 -
Rule Uruguay 1989 only - Oct 29 0:00 1:00 S
# Shanks says no DST was observed in 1990/1 and 1991/2,
# and that 1992/3's DST was from 10-25 to 03-01. Go with IATA.
Rule Uruguay 1990 1992 - Mar Sun>=1 0:00 0 -
Rule Uruguay 1990 1991 - Oct Sun>=21 0:00 1:00 S
Rule Uruguay 1992 only - Oct 18 0:00 1:00 S
Rule Uruguay 1993 only - Feb 28 0:00 0 -
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Montevideo -3:44:44 - LMT 1898 Jun 28
-3:44:44 - MMT 1920 May 1 # Montevideo MT
-3:30 Uruguay UY%sT 1942 Dec 14 # Uruguay Time
-3:00 Uruguay UY%sT
# Venezuela
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
Zone America/Caracas -4:27:44 - LMT 1890
-4:27:44 - CMT 1912 Feb 12 # Caracas Mean Time
-4:30 - VET 1965 # Venezuela Time
-4:00 - VET
|
194,532,719,010,073 | # @(#)zone.tab 1.15
#
# TZ zone descriptions
#
# From Paul Eggert <eggert@twinsun.com> (1996-08-05):
#
# This file contains a table with the following columns:
# 1. ISO 3166 2-character country code. See the file `iso3166.tab'.
# 2. Latitude and longitude of the zone's principal location
# in ISO 6709 sign-degrees-minutes-seconds format,
# either +-DDMM+-DDDMM or +-DDMMSS+-DDDMMSS,
# first latitude (+ is north), then longitude (+ is east).
# 3. Zone name used in value of TZ environment variable.
# 4. Comments; present if and only if the country has multiple rows.
#
# Columns are separated by a single tab.
# The table is sorted first by country, then an order within the country that
# (1) makes some geographical sense, and
# (2) puts the most populous zones first, where that does not contradict (1).
#
# Lines beginning with `#' are comments.
#
#country-
#code coordinates TZ comments
AD +4230+00131 Europe/Andorra
AE +2518+05518 Asia/Dubai
AF +3431+06912 Asia/Kabul
AG +1703-06148 America/Antigua
AI +1812-06304 America/Anguilla
AL +4120+01950 Europe/Tirane
AM +4011+04430 Asia/Yerevan
AN +1211-06900 America/Curacao
AO -0848+01314 Africa/Luanda
AQ -7750+16636 Antarctica/McMurdo McMurdo Station, Ross Island
AQ -9000+00000 Antarctica/South_Pole Amundsen-Scott Station, South Pole
AQ -6448-06406 Antarctica/Palmer Palmer Station, Anvers Island
AQ -6736+06253 Antarctica/Mawson Mawson Station, Holme Bay
AQ -6835+07758 Antarctica/Davis Davis Station, Vestfold Hills
AQ -6617+11031 Antarctica/Casey Casey Station, Bailey Peninsula
AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville Base, Terre Adelie
AQ -690022+0393524 Antarctica/Syowa Syowa Station, E Ongul I
AR -3436-05827 America/Buenos_Aires E Argentina (BA, DF, SC, TF)
AR -3257-06040 America/Rosario NE Argentina (SF, ER, CN, MN, CC, FM, LP, CH)
AR -3124-06411 America/Cordoba W Argentina (CB, SA, TM, LR, SJ, SL, NQ, RN)
AR -2411-06518 America/Jujuy Jujuy (JY)
AR -2828-06547 America/Catamarca Catamarca (CT)
AR -3253-06849 America/Mendoza Mendoza (MZ)
AS -1416-17042 Pacific/Pago_Pago
AT +4813+01620 Europe/Vienna
AU -3133+15905 Australia/Lord_Howe Lord Howe Island
AU -4253+14719 Australia/Hobart Tasmania
AU -3749+14458 Australia/Melbourne Victoria
AU -3352+15113 Australia/Sydney New South Wales - most locations
AU -3157+14127 Australia/Broken_Hill New South Wales - Broken Hill
AU -2728+15302 Australia/Brisbane Queensland - most locations
AU -2016+14900 Australia/Lindeman Queensland - Holiday Islands
AU -3455+13835 Australia/Adelaide South Australia
AU -1228+13050 Australia/Darwin Northern Territory
AU -3157+11551 Australia/Perth Western Australia
AW +1230-06858 America/Aruba
AZ +4023+04951 Asia/Baku
BA +4352+01825 Europe/Sarajevo
BB +1306-05937 America/Barbados
BD +2343+09025 Asia/Dacca
BE +5050+00420 Europe/Brussels
BF +1222-00131 Africa/Ouagadougou
BG +4241+02319 Europe/Sofia
BH +2623+05035 Asia/Bahrain
BI -0323+02922 Africa/Bujumbura
BJ +0629+00237 Africa/Porto-Novo
BM +3217-06446 Atlantic/Bermuda
BN +0456+11455 Asia/Brunei
BO -1630-06809 America/La_Paz
BR -0351-03225 America/Noronha Fernando de Noronha
BR -0127-04829 America/Belem Amapa, E Para
BR -0343-03830 America/Fortaleza NE Brazil (MA, PI, CE, RN, PR, PE)
BR -0712-04812 America/Araguaina Tocantins
BR -0940-03543 America/Maceio Alagoas, Sergipe
BR -2332-04637 America/Sao_Paulo S & SE Brazil (BA, GO, DF, MG, ES, RJ, SP, PR, SC, RS)
BR -1535-05605 America/Cuiaba Mato Grosso, Mato Grosso do Sul
BR -0846-06354 America/Porto_Velho W Para, Rondonia, Roraima
BR -0308-06001 America/Manaus Amazonas
BR -0934-06731 America/Porto_Acre Acre
BS +2505-07721 America/Nassau
BT +2728+08939 Asia/Thimbu
BW -2545+02555 Africa/Gaborone
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
CA +4734-05243 America/St_Johns Newfoundland Island
CA +4439-06336 America/Halifax Atlantic Time - Nova Scotia (most places), NB, W Labrador, E Quebec & PEI
CA +4612-05957 America/Glace_Bay Atlantic Time - Nova Scotia - places that did not observe DST 1966-1971
CA +5320-06025 America/Goose_Bay Atlantic Time - E Labrador
CA +4531-07334 America/Montreal Eastern Time - Ontario & Quebec - most locations
CA +4901-08816 America/Nipigon Eastern Time - Ontario & Quebec - places that did not observe DST 1967-1973
CA +4823-08915 America/Thunder_Bay Eastern Time - Thunder Bay, Ontario
CA +4953-09709 America/Winnipeg Central Time - Manitoba & west Ontario
CA +4843-09429 America/Rainy_River Central Time - Rainy River & Fort Frances, Ontario
CA +6608-06544 America/Pangnirtung Central Time - Pangnirtung, Nunavut
CA +6344-06828 America/Iqaluit Central Time - east Nunavut
CA +6245-09210 America/Rankin_Inlet Central Time - central Nunavut
CA +6903-10505 America/Cambridge_Bay Central Time - west Nunavut
CA +5024-10439 America/Regina Central Standard Time - Saskatchewan - most locations
CA +5017-10750 America/Swift_Current Central Standard Time - Saskatchewan - midwest
CA +5333-11328 America/Edmonton Mountain Time - Alberta, east British Columbia & west Saskatchewan
CA +6227-11421 America/Yellowknife Mountain Time - central Northwest Territories
CA +6825-11330 America/Inuvik Mountain Time - west Northwest Territories
CA +5946-12014 America/Dawson_Creek Mountain Standard Time - Dawson Creek & Fort Saint John, British Columbia
CA +4916-12307 America/Vancouver Pacific Time - west British Columbia
CA +6043-13503 America/Whitehorse Pacific Time - south Yukon
CA +6404-13925 America/Dawson Pacific Time - north Yukon
CC -1210+09655 Indian/Cocos
CD -0418+01518 Africa/Kinshasa west Dem. Rep. of Congo
CD -1140+02728 Africa/Lubumbashi east Dem. Rep. of Congo
CF +0422+01835 Africa/Bangui
CG -0416+01517 Africa/Brazzaville
CH +4723+00832 Europe/Zurich
CI +0519-00402 Africa/Abidjan
CK -2114-15946 Pacific/Rarotonga
CL -3327-07040 America/Santiago mainland
CL -2710-10927 Pacific/Easter Easter Island
CM +0403+00942 Africa/Douala
CN +4545+12641 Asia/Harbin north Manchuria
CN +3114+12128 Asia/Shanghai China coast
CN +2217+11409 Asia/Hong_Kong Hong Kong
CN +2934+10635 Asia/Chungking China mountains
CN +4348+08735 Asia/Urumqi Tibet & Xinjiang
CN +3929+07559 Asia/Kashgar Eastern Turkestan
CO +0436-07405 America/Bogota
CR +0956-08405 America/Costa_Rica
CU +2308-08222 America/Havana
CV +1455-02331 Atlantic/Cape_Verde
CX -1025+10543 Indian/Christmas
CY +3510+03322 Asia/Nicosia
CZ +5005+01426 Europe/Prague
DE +5230+01322 Europe/Berlin
DJ +1136+04309 Africa/Djibouti
DK +5540+01235 Europe/Copenhagen
DM +1518-06124 America/Dominica
DO +1828-06954 America/Santo_Domingo
DZ +3647+00303 Africa/Algiers
EC -0210-07950 America/Guayaquil mainland
EC -0054-08936 Pacific/Galapagos Galapagos Islands
EE +5925+02445 Europe/Tallinn
EG +3003+03115 Africa/Cairo
EH +2709-01312 Africa/El_Aaiun
ER +1520+03853 Africa/Asmera
ES +4024-00341 Europe/Madrid mainland
ES +3553-00519 Africa/Ceuta Ceuta & Melilla
ES +2806-01524 Atlantic/Canary Canary Islands
ET +0902+03842 Africa/Addis_Ababa
FI +6010+02458 Europe/Helsinki
FJ -1808+17825 Pacific/Fiji
FK -5142-05751 Atlantic/Stanley
FM +0931+13808 Pacific/Yap Yap
FM +0725+15147 Pacific/Truk Truk (Chuuk)
FM +0658+15813 Pacific/Ponape Ponape (Pohnpei)
FM +0519+16259 Pacific/Kosrae Kosrae
FO +6201-00646 Atlantic/Faeroe
FR +4852+00220 Europe/Paris
GA +0023+00927 Africa/Libreville
GB +512830-0001845 Europe/London Great Britain
GB +5435-00555 Europe/Belfast Northern Ireland
GD +1203-06145 America/Grenada
GE +4143+04449 Asia/Tbilisi
GF +0456-05220 America/Cayenne
GH +0533-00013 Africa/Accra
GI +3608-00521 Europe/Gibraltar
GL +7030-02215 America/Scoresbysund east Greenland
GL +6411-05144 America/Godthab southwest Greenland
GL +7634-06847 America/Thule northwest Greenland
GM +1328-01639 Africa/Banjul
GN +0931-01343 Africa/Conakry
GP +1614-06132 America/Guadeloupe
GQ +0345+00847 Africa/Malabo
GR +3758+02343 Europe/Athens
GS -5416-03632 Atlantic/South_Georgia
GT +1438-09031 America/Guatemala
GU +1328+14445 Pacific/Guam
GW +1151-01535 Africa/Bissau
GY +0648-05810 America/Guyana
HN +1406-08713 America/Tegucigalpa
HR +4548+01558 Europe/Zagreb
HT +1832-07220 America/Port-au-Prince
HU +4730+01905 Europe/Budapest
ID -0610+10648 Asia/Jakarta Java & Sumatra
ID -0507+11924 Asia/Ujung_Pandang Borneo & Celebes
ID -0232+14042 Asia/Jayapura Irian Jaya & the Moluccas
IE +5320-00615 Europe/Dublin
IL +3146+03514 Asia/Jerusalem
IN +2232+08822 Asia/Calcutta
IO -0720+07225 Indian/Chagos
IQ +3321+04425 Asia/Baghdad
IR +3540+05126 Asia/Tehran
IS +6409-02151 Atlantic/Reykjavik
IT +4154+01229 Europe/Rome
JM +1800-07648 America/Jamaica
JO +3157+03556 Asia/Amman
JP +353916+1394441 Asia/Tokyo
KE -0117+03649 Africa/Nairobi
KG +4254+07436 Asia/Bishkek
KH +1133+10455 Asia/Phnom_Penh
KI +0125+17300 Pacific/Tarawa Gilbert Islands
KI -0308-17105 Pacific/Enderbury Phoenix Islands
KI +0152-15720 Pacific/Kiritimati Line Islands
KM -1141+04316 Indian/Comoro
KN +1718-06243 America/St_Kitts
KP +3901+12545 Asia/Pyongyang
KR +3733+12658 Asia/Seoul
KW +2920+04759 Asia/Kuwait
KY +1918-08123 America/Cayman
KZ +4315+07657 Asia/Almaty east Kazakhstan
KZ +5017+05710 Asia/Aqtobe central Kazakhstan
KZ +4431+05016 Asia/Aqtau west Kazakhstan
LA +1758+10236 Asia/Vientiane
LB +3353+03530 Asia/Beirut
LC +1401-06100 America/St_Lucia
LI +4709+00931 Europe/Vaduz
LK +0656+07951 Asia/Colombo
LR +0618-01047 Africa/Monrovia
LS -2928+02730 Africa/Maseru
LT +5441+02519 Europe/Vilnius
LU +4936+00609 Europe/Luxembourg
LV +5657+02406 Europe/Riga
LY +3254+01311 Africa/Tripoli
MA +3339-00735 Africa/Casablanca
MC +4342+00723 Europe/Monaco
MD +4700+02850 Europe/Chisinau
MG -1855+04731 Indian/Antananarivo
MH +0709+17112 Pacific/Majuro most locations
MH +0905+16720 Pacific/Kwajalein Kwajalein
MK +4159+02126 Europe/Skopje
ML +1239-00800 Africa/Bamako southwest Mali
ML +1446-00301 Africa/Timbuktu northeast Mali
MM +1647+09610 Asia/Rangoon
MN +4755+10653 Asia/Ulan_Bator
MO +2214+11335 Asia/Macao
MP +1512+14545 Pacific/Saipan
MQ +1436-06105 America/Martinique
MR +1806-01557 Africa/Nouakchott
MS +1644-06213 America/Montserrat
MT +3554+01431 Europe/Malta
MU -2010+05730 Indian/Mauritius
MV +0410+07330 Indian/Maldives
MW -1547+03500 Africa/Blantyre
MX +2105-08646 America/Cancun Eastern Time
MX +1924-09909 America/Mexico_City Central Time
MX +2313-10625 America/Mazatlan Mountain Time - most locations
MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua
MX +3152-11637 America/Ensenada Pacific Time - most locations
MX +3232-11701 America/Tijuana Pacific Time - north Baja California
MY +0310+10142 Asia/Kuala_Lumpur peninsular Malaysia
MY +0133+11020 Asia/Kuching Sabah & Sarawak
MZ -2558+03235 Africa/Maputo
NA -2234+01706 Africa/Windhoek
NC -2216+16530 Pacific/Noumea
NE +1331+00207 Africa/Niamey
NF -2903+16758 Pacific/Norfolk
NG +0627+00324 Africa/Lagos
NI +1209-08617 America/Managua
NL +5222+00454 Europe/Amsterdam
NO +5955+01045 Europe/Oslo
NP +2743+08519 Asia/Katmandu
NR -0031+16655 Pacific/Nauru
NU -1901+16955 Pacific/Niue
NZ -3652+17446 Pacific/Auckland most locations
NZ -4355-17630 Pacific/Chatham Chatham Islands
OM +2336+05835 Asia/Muscat
PA +0858-07932 America/Panama
PE -1203-07703 America/Lima
PF -1732-14934 Pacific/Tahiti Society Islands
PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG -0930+14710 Pacific/Port_Moresby
PH +1435+12100 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw
PM +4703-05620 America/Miquelon
PN -2504-13005 Pacific/Pitcairn
PR +182806-0660622 America/Puerto_Rico
PS +3130+03428 Asia/Gaza
PT +3843-00908 Europe/Lisbon mainland
PT +3238-01654 Atlantic/Madeira Madeira Islands
PT +3744-02540 Atlantic/Azores Azores
PW +0720+13429 Pacific/Palau
PY -2516-05740 America/Asuncion
QA +2517+05132 Asia/Qatar
RE -2052+05528 Indian/Reunion
RO +4426+02606 Europe/Bucharest
RU +5443+02030 Europe/Kaliningrad Moscow-01 - Kaliningrad
RU +5545+03735 Europe/Moscow Moscow+00 - west Russia
RU +5312+05009 Europe/Samara Moscow+01 - Caspian Sea
RU +5651+06036 Asia/Yekaterinburg Moscow+02 - Urals
RU +5500+07324 Asia/Omsk Moscow+03 - west Siberia
RU +5502+08255 Asia/Novosibirsk Moscow+03 - Novosibirsk
RU +5601+09250 Asia/Krasnoyarsk Moscow+04 - Yenisei River
RU +5216+10420 Asia/Irkutsk Moscow+05 - Lake Baikal
RU +6200+12940 Asia/Yakutsk Moscow+06 - Lena River
RU +4310+13156 Asia/Vladivostok Moscow+07 - Amur River
RU +5934+15048 Asia/Magadan Moscow+08 - Magadan & Sakhalin
RU +5301+15839 Asia/Kamchatka Moscow+09 - Kamchatka
RU +6445+17729 Asia/Anadyr Moscow+10 - Bering Sea
RW -0157+03004 Africa/Kigali
SA +2438+04643 Asia/Riyadh
SB -0932+16012 Pacific/Guadalcanal
SC -0440+05528 Indian/Mahe
SD +1536+03232 Africa/Khartoum
SE +5920+01803 Europe/Stockholm
SG +0117+10351 Asia/Singapore
SH -1555-00542 Atlantic/St_Helena
SI +4603+01431 Europe/Ljubljana
SJ +7800+01600 Arctic/Longyearbyen Svalbard
SJ +7059-00805 Atlantic/Jan_Mayen Jan Mayen
SK +4809+01707 Europe/Bratislava
SL +0830-01315 Africa/Freetown
SM +4355+01228 Europe/San_Marino
SN +1440-01726 Africa/Dakar
SO +0204+04522 Africa/Mogadishu
SR +0550-05510 America/Paramaribo
ST +0020+00644 Africa/Sao_Tome
SV +1342-08912 America/El_Salvador
SY +3330+03618 Asia/Damascus
SZ -2618+03106 Africa/Mbabane
TC +2128-07108 America/Grand_Turk
TD +1207+01503 Africa/Ndjamena
TF -492110+0701303 Indian/Kerguelen
TG +0608+00113 Africa/Lome
TH +1345+10031 Asia/Bangkok
TJ +3835+06848 Asia/Dushanbe
TK -0922-17114 Pacific/Fakaofo
TM +3757+05823 Asia/Ashkhabad
TN +3648+01011 Africa/Tunis
TO -2110+17510 Pacific/Tongatapu
TR +4101+02858 Europe/Istanbul
TT +1039-06131 America/Port_of_Spain
TV -0831+17913 Pacific/Funafuti
TW +2503+12130 Asia/Taipei
TZ -0648+03917 Africa/Dar_es_Salaam
UA +5026+03031 Europe/Kiev most locations
UA +4457+03406 Europe/Simferopol Crimea
UG +0019+03225 Africa/Kampala
UM +1700-16830 Pacific/Johnston Johnston Atoll
UM +2813-17722 Pacific/Midway Midway Islands
UM +1917+16637 Pacific/Wake Wake Island
US +404251-0740023 America/New_York Eastern Time
US +421953-0830245 America/Detroit Eastern Time - Michigan - most locations
US +381515-0854534 America/Louisville Eastern Time - Louisville, Kentucky
US +394606-0860929 America/Indianapolis Eastern Standard Time - Indiana - most locations
US +382232-0862041 America/Indiana/Marengo Eastern Standard Time - Indiana - Crawford County
US +411745-0863730 America/Indiana/Knox Eastern Standard Time - Indiana - Starke County
US +384452-0850402 America/Indiana/Vevay Eastern Standard Time - Indiana - Switzerland County
US +415100-0873900 America/Chicago Central Time
US +450628-0873651 America/Menominee Central Time - Michigan - Wisconsin border
US +394421-1045903 America/Denver Mountain Time
US +433649-1161209 America/Boise Mountain Time - south Idaho & east Oregon
US +364708-1084111 America/Shiprock Mountain Time - Navajo
US +332654-1120424 America/Phoenix Mountain Standard Time - Arizona
US +340308-1181434 America/Los_Angeles Pacific Time
US +611305-1495401 America/Anchorage Alaska Time
US +581807-1342511 America/Juneau Alaska Time - Alaska panhandle
US +593249-1394338 America/Yakutat Alaska Time - Alaska panhandle neck
US +643004-1652423 America/Nome Alaska Time - west Alaska
US +515248-1763929 America/Adak Aleutian Islands
US +211825-1575130 Pacific/Honolulu Hawaii
UY -3453-05611 America/Montevideo
UZ +3940+06648 Asia/Samarkand west Uzbekistan
UZ +4120+06918 Asia/Tashkent east Uzbekistan
VA +4154+01227 Europe/Vatican
VC +1309-06114 America/St_Vincent
VE +1030-06656 America/Caracas
VG +1827-06437 America/Tortola
VI +1821-06456 America/St_Thomas
VN +1045+10640 Asia/Saigon
VU -1740+16825 Pacific/Efate
WF -1318-17610 Pacific/Wallis
WS -1350-17144 Pacific/Apia
YE +1245+04512 Asia/Aden
YT -1247+04514 Indian/Mayotte
YU +4450+02030 Europe/Belgrade
ZA -2615+02800 Africa/Johannesburg
ZM -1525+02817 Africa/Lusaka
ZW -1750+03103 Africa/Harare
| # @(#)zone.tab 1.16
#
# TZ zone descriptions
#
# From Paul Eggert <eggert@twinsun.com> (1996-08-05):
#
# This file contains a table with the following columns:
# 1. ISO 3166 2-character country code. See the file `iso3166.tab'.
# 2. Latitude and longitude of the zone's principal location
# in ISO 6709 sign-degrees-minutes-seconds format,
# either +-DDMM+-DDDMM or +-DDMMSS+-DDDMMSS,
# first latitude (+ is north), then longitude (+ is east).
# 3. Zone name used in value of TZ environment variable.
# 4. Comments; present if and only if the country has multiple rows.
#
# Columns are separated by a single tab.
# The table is sorted first by country, then an order within the country that
# (1) makes some geographical sense, and
# (2) puts the most populous zones first, where that does not contradict (1).
#
# Lines beginning with `#' are comments.
#
#country-
#code coordinates TZ comments
AD +4230+00131 Europe/Andorra
AE +2518+05518 Asia/Dubai
AF +3431+06912 Asia/Kabul
AG +1703-06148 America/Antigua
AI +1812-06304 America/Anguilla
AL +4120+01950 Europe/Tirane
AM +4011+04430 Asia/Yerevan
AN +1211-06900 America/Curacao
AO -0848+01314 Africa/Luanda
AQ -7750+16636 Antarctica/McMurdo McMurdo Station, Ross Island
AQ -9000+00000 Antarctica/South_Pole Amundsen-Scott Station, South Pole
AQ -6448-06406 Antarctica/Palmer Palmer Station, Anvers Island
AQ -6736+06253 Antarctica/Mawson Mawson Station, Holme Bay
AQ -6835+07758 Antarctica/Davis Davis Station, Vestfold Hills
AQ -6617+11031 Antarctica/Casey Casey Station, Bailey Peninsula
AQ -6640+14001 Antarctica/DumontDUrville Dumont-d'Urville Base, Terre Adelie
AQ -690022+0393524 Antarctica/Syowa Syowa Station, E Ongul I
AR -3436-05827 America/Buenos_Aires E Argentina (BA, DF, SC, TF)
AR -3257-06040 America/Rosario NE Argentina (SF, ER, CN, MN, CC, FM, LP, CH)
AR -3124-06411 America/Cordoba W Argentina (CB, SA, TM, LR, SJ, SL, NQ, RN)
AR -2411-06518 America/Jujuy Jujuy (JY)
AR -2828-06547 America/Catamarca Catamarca (CT)
AR -3253-06849 America/Mendoza Mendoza (MZ)
AS -1416-17042 Pacific/Pago_Pago
AT +4813+01620 Europe/Vienna
AU -3133+15905 Australia/Lord_Howe Lord Howe Island
AU -4253+14719 Australia/Hobart Tasmania
AU -3749+14458 Australia/Melbourne Victoria
AU -3352+15113 Australia/Sydney New South Wales - most locations
AU -3157+14127 Australia/Broken_Hill New South Wales - Broken Hill
AU -2728+15302 Australia/Brisbane Queensland - most locations
AU -2016+14900 Australia/Lindeman Queensland - Holiday Islands
AU -3455+13835 Australia/Adelaide South Australia
AU -1228+13050 Australia/Darwin Northern Territory
AU -3157+11551 Australia/Perth Western Australia
AW +1230-06858 America/Aruba
AZ +4023+04951 Asia/Baku
BA +4352+01825 Europe/Sarajevo
BB +1306-05937 America/Barbados
BD +2343+09025 Asia/Dacca
BE +5050+00420 Europe/Brussels
BF +1222-00131 Africa/Ouagadougou
BG +4241+02319 Europe/Sofia
BH +2623+05035 Asia/Bahrain
BI -0323+02922 Africa/Bujumbura
BJ +0629+00237 Africa/Porto-Novo
BM +3217-06446 Atlantic/Bermuda
BN +0456+11455 Asia/Brunei
BO -1630-06809 America/La_Paz
BR -0351-03225 America/Noronha Fernando de Noronha
BR -0127-04829 America/Belem Amapa, E Para
BR -0343-03830 America/Fortaleza NE Brazil (MA, PI, CE, RN, PR, PE)
BR -0712-04812 America/Araguaina Tocantins
BR -0940-03543 America/Maceio Alagoas, Sergipe
BR -2332-04637 America/Sao_Paulo S & SE Brazil (BA, GO, DF, MG, ES, RJ, SP, PR, SC, RS)
BR -1535-05605 America/Cuiaba Mato Grosso, Mato Grosso do Sul
BR -0846-06354 America/Porto_Velho W Para, Rondonia
BR +0249-06040 America/Boa_Vista Roraima
BR -0308-06001 America/Manaus Amazonas
BR -0934-06731 America/Porto_Acre Acre
BS +2505-07721 America/Nassau
BT +2728+08939 Asia/Thimbu
BW -2545+02555 Africa/Gaborone
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
CA +4734-05243 America/St_Johns Newfoundland Island
CA +4439-06336 America/Halifax Atlantic Time - Nova Scotia (most places), NB, W Labrador, E Quebec & PEI
CA +4612-05957 America/Glace_Bay Atlantic Time - Nova Scotia - places that did not observe DST 1966-1971
CA +5320-06025 America/Goose_Bay Atlantic Time - E Labrador
CA +4531-07334 America/Montreal Eastern Time - Ontario & Quebec - most locations
CA +4901-08816 America/Nipigon Eastern Time - Ontario & Quebec - places that did not observe DST 1967-1973
CA +4823-08915 America/Thunder_Bay Eastern Time - Thunder Bay, Ontario
CA +4953-09709 America/Winnipeg Central Time - Manitoba & west Ontario
CA +4843-09429 America/Rainy_River Central Time - Rainy River & Fort Frances, Ontario
CA +6608-06544 America/Pangnirtung Central Time - Pangnirtung, Nunavut
CA +6344-06828 America/Iqaluit Central Time - east Nunavut
CA +6245-09210 America/Rankin_Inlet Central Time - central Nunavut
CA +6903-10505 America/Cambridge_Bay Central Time - west Nunavut
CA +5024-10439 America/Regina Central Standard Time - Saskatchewan - most locations
CA +5017-10750 America/Swift_Current Central Standard Time - Saskatchewan - midwest
CA +5333-11328 America/Edmonton Mountain Time - Alberta, east British Columbia & west Saskatchewan
CA +6227-11421 America/Yellowknife Mountain Time - central Northwest Territories
CA +6825-11330 America/Inuvik Mountain Time - west Northwest Territories
CA +5946-12014 America/Dawson_Creek Mountain Standard Time - Dawson Creek & Fort Saint John, British Columbia
CA +4916-12307 America/Vancouver Pacific Time - west British Columbia
CA +6043-13503 America/Whitehorse Pacific Time - south Yukon
CA +6404-13925 America/Dawson Pacific Time - north Yukon
CC -1210+09655 Indian/Cocos
CD -0418+01518 Africa/Kinshasa west Dem. Rep. of Congo
CD -1140+02728 Africa/Lubumbashi east Dem. Rep. of Congo
CF +0422+01835 Africa/Bangui
CG -0416+01517 Africa/Brazzaville
CH +4723+00832 Europe/Zurich
CI +0519-00402 Africa/Abidjan
CK -2114-15946 Pacific/Rarotonga
CL -3327-07040 America/Santiago mainland
CL -2710-10927 Pacific/Easter Easter Island
CM +0403+00942 Africa/Douala
CN +4545+12641 Asia/Harbin north Manchuria
CN +3114+12128 Asia/Shanghai China coast
CN +2934+10635 Asia/Chungking China mountains
CN +4348+08735 Asia/Urumqi Tibet & Xinjiang
CN +3929+07559 Asia/Kashgar Eastern Turkestan
CO +0436-07405 America/Bogota
CR +0956-08405 America/Costa_Rica
CU +2308-08222 America/Havana
CV +1455-02331 Atlantic/Cape_Verde
CX -1025+10543 Indian/Christmas
CY +3510+03322 Asia/Nicosia
CZ +5005+01426 Europe/Prague
DE +5230+01322 Europe/Berlin
DJ +1136+04309 Africa/Djibouti
DK +5540+01235 Europe/Copenhagen
DM +1518-06124 America/Dominica
DO +1828-06954 America/Santo_Domingo
DZ +3647+00303 Africa/Algiers
EC -0210-07950 America/Guayaquil mainland
EC -0054-08936 Pacific/Galapagos Galapagos Islands
EE +5925+02445 Europe/Tallinn
EG +3003+03115 Africa/Cairo
EH +2709-01312 Africa/El_Aaiun
ER +1520+03853 Africa/Asmera
ES +4024-00341 Europe/Madrid mainland
ES +3553-00519 Africa/Ceuta Ceuta & Melilla
ES +2806-01524 Atlantic/Canary Canary Islands
ET +0902+03842 Africa/Addis_Ababa
FI +6010+02458 Europe/Helsinki
FJ -1808+17825 Pacific/Fiji
FK -5142-05751 Atlantic/Stanley
FM +0931+13808 Pacific/Yap Yap
FM +0725+15147 Pacific/Truk Truk (Chuuk)
FM +0658+15813 Pacific/Ponape Ponape (Pohnpei)
FM +0519+16259 Pacific/Kosrae Kosrae
FO +6201-00646 Atlantic/Faeroe
FR +4852+00220 Europe/Paris
GA +0023+00927 Africa/Libreville
GB +512830-0001845 Europe/London Great Britain
GB +5435-00555 Europe/Belfast Northern Ireland
GD +1203-06145 America/Grenada
GE +4143+04449 Asia/Tbilisi
GF +0456-05220 America/Cayenne
GH +0533-00013 Africa/Accra
GI +3608-00521 Europe/Gibraltar
GL +7030-02215 America/Scoresbysund east Greenland
GL +6411-05144 America/Godthab southwest Greenland
GL +7634-06847 America/Thule northwest Greenland
GM +1328-01639 Africa/Banjul
GN +0931-01343 Africa/Conakry
GP +1614-06132 America/Guadeloupe
GQ +0345+00847 Africa/Malabo
GR +3758+02343 Europe/Athens
GS -5416-03632 Atlantic/South_Georgia
GT +1438-09031 America/Guatemala
GU +1328+14445 Pacific/Guam
GW +1151-01535 Africa/Bissau
GY +0648-05810 America/Guyana
HK +2217+11409 Asia/Hong_Kong
HN +1406-08713 America/Tegucigalpa
HR +4548+01558 Europe/Zagreb
HT +1832-07220 America/Port-au-Prince
HU +4730+01905 Europe/Budapest
ID -0610+10648 Asia/Jakarta Java & Sumatra
ID -0507+11924 Asia/Ujung_Pandang Borneo & Celebes
ID -0232+14042 Asia/Jayapura Irian Jaya & the Moluccas
IE +5320-00615 Europe/Dublin
IL +3146+03514 Asia/Jerusalem
IN +2232+08822 Asia/Calcutta
IO -0720+07225 Indian/Chagos
IQ +3321+04425 Asia/Baghdad
IR +3540+05126 Asia/Tehran
IS +6409-02151 Atlantic/Reykjavik
IT +4154+01229 Europe/Rome
JM +1800-07648 America/Jamaica
JO +3157+03556 Asia/Amman
JP +353916+1394441 Asia/Tokyo
KE -0117+03649 Africa/Nairobi
KG +4254+07436 Asia/Bishkek
KH +1133+10455 Asia/Phnom_Penh
KI +0125+17300 Pacific/Tarawa Gilbert Islands
KI -0308-17105 Pacific/Enderbury Phoenix Islands
KI +0152-15720 Pacific/Kiritimati Line Islands
KM -1141+04316 Indian/Comoro
KN +1718-06243 America/St_Kitts
KP +3901+12545 Asia/Pyongyang
KR +3733+12658 Asia/Seoul
KW +2920+04759 Asia/Kuwait
KY +1918-08123 America/Cayman
KZ +4315+07657 Asia/Almaty east Kazakhstan
KZ +5017+05710 Asia/Aqtobe central Kazakhstan
KZ +4431+05016 Asia/Aqtau west Kazakhstan
LA +1758+10236 Asia/Vientiane
LB +3353+03530 Asia/Beirut
LC +1401-06100 America/St_Lucia
LI +4709+00931 Europe/Vaduz
LK +0656+07951 Asia/Colombo
LR +0618-01047 Africa/Monrovia
LS -2928+02730 Africa/Maseru
LT +5441+02519 Europe/Vilnius
LU +4936+00609 Europe/Luxembourg
LV +5657+02406 Europe/Riga
LY +3254+01311 Africa/Tripoli
MA +3339-00735 Africa/Casablanca
MC +4342+00723 Europe/Monaco
MD +4700+02850 Europe/Chisinau
MG -1855+04731 Indian/Antananarivo
MH +0709+17112 Pacific/Majuro most locations
MH +0905+16720 Pacific/Kwajalein Kwajalein
MK +4159+02126 Europe/Skopje
ML +1239-00800 Africa/Bamako southwest Mali
ML +1446-00301 Africa/Timbuktu northeast Mali
MM +1647+09610 Asia/Rangoon
MN +4755+10653 Asia/Ulan_Bator
MO +2214+11335 Asia/Macao
MP +1512+14545 Pacific/Saipan
MQ +1436-06105 America/Martinique
MR +1806-01557 Africa/Nouakchott
MS +1644-06213 America/Montserrat
MT +3554+01431 Europe/Malta
MU -2010+05730 Indian/Mauritius
MV +0410+07330 Indian/Maldives
MW -1547+03500 Africa/Blantyre
MX +1924-09909 America/Mexico_City Central Time - most locations
MX +2105-08646 America/Cancun Central Time - Quintana Roo
MX +2313-10625 America/Mazatlan Mountain Time - most locations
MX +2838-10605 America/Chihuahua Mountain Time - Chihuahua
MX +3232-11701 America/Tijuana Pacific Time
MY +0310+10142 Asia/Kuala_Lumpur peninsular Malaysia
MY +0133+11020 Asia/Kuching Sabah & Sarawak
MZ -2558+03235 Africa/Maputo
NA -2234+01706 Africa/Windhoek
NC -2216+16530 Pacific/Noumea
NE +1331+00207 Africa/Niamey
NF -2903+16758 Pacific/Norfolk
NG +0627+00324 Africa/Lagos
NI +1209-08617 America/Managua
NL +5222+00454 Europe/Amsterdam
NO +5955+01045 Europe/Oslo
NP +2743+08519 Asia/Katmandu
NR -0031+16655 Pacific/Nauru
NU -1901+16955 Pacific/Niue
NZ -3652+17446 Pacific/Auckland most locations
NZ -4355-17630 Pacific/Chatham Chatham Islands
OM +2336+05835 Asia/Muscat
PA +0858-07932 America/Panama
PE -1203-07703 America/Lima
PF -1732-14934 Pacific/Tahiti Society Islands
PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG -0930+14710 Pacific/Port_Moresby
PH +1435+12100 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw
PM +4703-05620 America/Miquelon
PN -2504-13005 Pacific/Pitcairn
PR +182806-0660622 America/Puerto_Rico
PS +3130+03428 Asia/Gaza
PT +3843-00908 Europe/Lisbon mainland
PT +3238-01654 Atlantic/Madeira Madeira Islands
PT +3744-02540 Atlantic/Azores Azores
PW +0720+13429 Pacific/Palau
PY -2516-05740 America/Asuncion
QA +2517+05132 Asia/Qatar
RE -2052+05528 Indian/Reunion
RO +4426+02606 Europe/Bucharest
RU +5443+02030 Europe/Kaliningrad Moscow-01 - Kaliningrad
RU +5545+03735 Europe/Moscow Moscow+00 - west Russia
RU +5312+05009 Europe/Samara Moscow+01 - Caspian Sea
RU +5651+06036 Asia/Yekaterinburg Moscow+02 - Urals
RU +5500+07324 Asia/Omsk Moscow+03 - west Siberia
RU +5502+08255 Asia/Novosibirsk Moscow+03 - Novosibirsk
RU +5601+09250 Asia/Krasnoyarsk Moscow+04 - Yenisei River
RU +5216+10420 Asia/Irkutsk Moscow+05 - Lake Baikal
RU +6200+12940 Asia/Yakutsk Moscow+06 - Lena River
RU +4310+13156 Asia/Vladivostok Moscow+07 - Amur River
RU +5934+15048 Asia/Magadan Moscow+08 - Magadan & Sakhalin
RU +5301+15839 Asia/Kamchatka Moscow+09 - Kamchatka
RU +6445+17729 Asia/Anadyr Moscow+10 - Bering Sea
RW -0157+03004 Africa/Kigali
SA +2438+04643 Asia/Riyadh
SB -0932+16012 Pacific/Guadalcanal
SC -0440+05528 Indian/Mahe
SD +1536+03232 Africa/Khartoum
SE +5920+01803 Europe/Stockholm
SG +0117+10351 Asia/Singapore
SH -1555-00542 Atlantic/St_Helena
SI +4603+01431 Europe/Ljubljana
SJ +7800+01600 Arctic/Longyearbyen Svalbard
SJ +7059-00805 Atlantic/Jan_Mayen Jan Mayen
SK +4809+01707 Europe/Bratislava
SL +0830-01315 Africa/Freetown
SM +4355+01228 Europe/San_Marino
SN +1440-01726 Africa/Dakar
SO +0204+04522 Africa/Mogadishu
SR +0550-05510 America/Paramaribo
ST +0020+00644 Africa/Sao_Tome
SV +1342-08912 America/El_Salvador
SY +3330+03618 Asia/Damascus
SZ -2618+03106 Africa/Mbabane
TC +2128-07108 America/Grand_Turk
TD +1207+01503 Africa/Ndjamena
TF -492110+0701303 Indian/Kerguelen
TG +0608+00113 Africa/Lome
TH +1345+10031 Asia/Bangkok
TJ +3835+06848 Asia/Dushanbe
TK -0922-17114 Pacific/Fakaofo
TM +3757+05823 Asia/Ashkhabad
TN +3648+01011 Africa/Tunis
TO -2110+17510 Pacific/Tongatapu
TP -0833+12535 Asia/Dili
TR +4101+02858 Europe/Istanbul
TT +1039-06131 America/Port_of_Spain
TV -0831+17913 Pacific/Funafuti
TW +2503+12130 Asia/Taipei
TZ -0648+03917 Africa/Dar_es_Salaam
UA +5026+03031 Europe/Kiev most locations
UA +4457+03406 Europe/Simferopol Crimea
UG +0019+03225 Africa/Kampala
UM +1700-16830 Pacific/Johnston Johnston Atoll
UM +2813-17722 Pacific/Midway Midway Islands
UM +1917+16637 Pacific/Wake Wake Island
US +404251-0740023 America/New_York Eastern Time
US +421953-0830245 America/Detroit Eastern Time - Michigan - most locations
US +381515-0854534 America/Louisville Eastern Time - Louisville, Kentucky
US +394606-0860929 America/Indianapolis Eastern Standard Time - Indiana - most locations
US +382232-0862041 America/Indiana/Marengo Eastern Standard Time - Indiana - Crawford County
US +411745-0863730 America/Indiana/Knox Eastern Standard Time - Indiana - Starke County
US +384452-0850402 America/Indiana/Vevay Eastern Standard Time - Indiana - Switzerland County
US +415100-0873900 America/Chicago Central Time
US +450628-0873651 America/Menominee Central Time - Michigan - Wisconsin border
US +394421-1045903 America/Denver Mountain Time
US +433649-1161209 America/Boise Mountain Time - south Idaho & east Oregon
US +364708-1084111 America/Shiprock Mountain Time - Navajo
US +332654-1120424 America/Phoenix Mountain Standard Time - Arizona
US +340308-1181434 America/Los_Angeles Pacific Time
US +611305-1495401 America/Anchorage Alaska Time
US +581807-1342511 America/Juneau Alaska Time - Alaska panhandle
US +593249-1394338 America/Yakutat Alaska Time - Alaska panhandle neck
US +643004-1652423 America/Nome Alaska Time - west Alaska
US +515248-1763929 America/Adak Aleutian Islands
US +211825-1575130 Pacific/Honolulu Hawaii
UY -3453-05611 America/Montevideo
UZ +3940+06648 Asia/Samarkand west Uzbekistan
UZ +4120+06918 Asia/Tashkent east Uzbekistan
VA +4154+01227 Europe/Vatican
VC +1309-06114 America/St_Vincent
VE +1030-06656 America/Caracas
VG +1827-06437 America/Tortola
VI +1821-06456 America/St_Thomas
VN +1045+10640 Asia/Saigon
VU -1740+16825 Pacific/Efate
WF -1318-17610 Pacific/Wallis
WS -1350-17144 Pacific/Apia
YE +1245+04512 Asia/Aden
YT -1247+04514 Indian/Mayotte
YU +4450+02030 Europe/Belgrade
ZA -2615+02800 Africa/Johannesburg
ZM -1525+02817 Africa/Lusaka
ZW -1750+03103 Africa/Harare
|
164,057,968,633,021 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
#include "k5-int.h"
#include <kadm5/admin.h>
#include <syslog.h>
#include <adm_proto.h> /* krb5_klog_syslog */
#include <stdio.h>
#include <errno.h>
#include "kadm5/server_internal.h" /* XXX for kadm5_server_handle_t */
#include "misc.h"
#ifndef GETSOCKNAME_ARG3_TYPE
#define GETSOCKNAME_ARG3_TYPE int
#endif
#define RFC3244_VERSION 0xff80
static krb5_error_code
process_chpw_request(krb5_context context, void *server_handle, char *realm,
krb5_keytab keytab, const krb5_fulladdr *local_faddr,
const krb5_fulladdr *remote_faddr, krb5_data *req,
krb5_data *rep)
{
krb5_error_code ret;
char *ptr;
unsigned int plen, vno;
krb5_data ap_req, ap_rep = empty_data();
krb5_data cipher = empty_data(), clear = empty_data();
krb5_auth_context auth_context = NULL;
krb5_principal changepw = NULL;
krb5_principal client, target = NULL;
krb5_ticket *ticket = NULL;
krb5_replay_data replay;
krb5_error krberror;
int numresult;
char strresult[1024];
char *clientstr = NULL, *targetstr = NULL;
const char *errmsg = NULL;
size_t clen;
char *cdots;
struct sockaddr_storage ss;
socklen_t salen;
char addrbuf[100];
krb5_address *addr = remote_faddr->address;
*rep = empty_data();
if (req->length < 4) {
/* either this, or the server is printing bad messages,
or the caller passed in garbage */
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated", sizeof(strresult));
goto chpwfail;
}
ptr = req->data;
/* verify length */
plen = (*ptr++ & 0xff);
plen = (plen<<8) | (*ptr++ & 0xff);
if (plen != req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request length was inconsistent",
sizeof(strresult));
goto chpwfail;
}
/* verify version number */
vno = (*ptr++ & 0xff) ;
vno = (vno<<8) | (*ptr++ & 0xff);
if (vno != 1 && vno != RFC3244_VERSION) {
ret = KRB5KDC_ERR_BAD_PVNO;
numresult = KRB5_KPASSWD_BAD_VERSION;
snprintf(strresult, sizeof(strresult),
"Request contained unknown protocol version number %d", vno);
goto chpwfail;
}
/* read, check ap-req length */
ap_req.length = (*ptr++ & 0xff);
ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff);
if (ptr + ap_req.length >= req->data + req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated in AP-REQ",
sizeof(strresult));
goto chpwfail;
}
/* verify ap_req */
ap_req.data = ptr;
ptr += ap_req.length;
ret = krb5_auth_con_init(context, &auth_context);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_auth_con_setflags(context, auth_context,
KRB5_AUTH_CONTEXT_DO_SEQUENCE);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_build_principal(context, &changepw, strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed building kadmin/changepw principal",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab,
NULL, &ticket);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed reading application request",
sizeof(strresult));
goto chpwfail;
}
/* construct the ap-rep */
ret = krb5_mk_rep(context, auth_context, &ap_rep);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed replying to application request",
sizeof(strresult));
goto chpwfail;
}
/* decrypt the ChangePasswdData */
cipher.length = (req->data + req->length) - ptr;
cipher.data = ptr;
/*
* Don't set a remote address in auth_context before calling krb5_rd_priv,
* so that we can work against clients behind a NAT. Reflection attacks
* aren't a concern since we use sequence numbers and since our requests
* don't look anything like our responses. Also don't set a local address,
* since we don't know what interface the request was received on.
*/
ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed decrypting request", sizeof(strresult));
goto chpwfail;
}
client = ticket->enc_part2->client;
/* decode ChangePasswdData for setpw requests */
if (vno == RFC3244_VERSION) {
krb5_data *clear_data;
ret = decode_krb5_setpw_req(&clear, &clear_data, &target);
if (ret != 0) {
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Failed decoding ChangePasswdData",
sizeof(strresult));
goto chpwfail;
}
zapfree(clear.data, clear.length);
clear = *clear_data;
free(clear_data);
if (target != NULL) {
ret = krb5_unparse_name(context, target, &targetstr);
if (ret != 0) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing target name for log",
sizeof(strresult));
goto chpwfail;
}
}
}
ret = krb5_unparse_name(context, client, &clientstr);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing client name for log",
sizeof(strresult));
goto chpwfail;
}
/* for cpw, verify that this is an AS_REQ ticket */
if (vno == 1 &&
(ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) {
numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED;
strlcpy(strresult, "Ticket must be derived from a password",
sizeof(strresult));
goto chpwfail;
}
/* change the password */
ptr = k5memdup0(clear.data, clear.length, &ret);
ret = schpw_util_wrapper(server_handle, client, target,
(ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0,
ptr, NULL, strresult, sizeof(strresult));
if (ret)
errmsg = krb5_get_error_message(context, ret);
/* zap the password */
zapfree(clear.data, clear.length);
zapfree(ptr, clear.length);
clear = empty_data();
clen = strlen(clientstr);
trunc_name(&clen, &cdots);
switch (addr->addrtype) {
case ADDRTYPE_INET: {
struct sockaddr_in *sin = ss2sin(&ss);
sin->sin_family = AF_INET;
memcpy(&sin->sin_addr, addr->contents, addr->length);
sin->sin_port = htons(remote_faddr->port);
salen = sizeof(*sin);
break;
}
case ADDRTYPE_INET6: {
struct sockaddr_in6 *sin6 = ss2sin6(&ss);
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, addr->contents, addr->length);
sin6->sin6_port = htons(remote_faddr->port);
salen = sizeof(*sin6);
break;
}
default: {
struct sockaddr *sa = ss2sa(&ss);
sa->sa_family = AF_UNSPEC;
salen = sizeof(*sa);
break;
}
}
if (getnameinfo(ss2sa(&ss), salen,
addrbuf, sizeof(addrbuf), NULL, 0,
NI_NUMERICHOST | NI_NUMERICSERV) != 0)
strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf));
if (vno == RFC3244_VERSION) {
size_t tlen;
char *tdots;
const char *targetp;
if (target == NULL) {
tlen = clen;
tdots = cdots;
targetp = targetstr;
} else {
tlen = strlen(targetstr);
trunc_name(&tlen, &tdots);
targetp = clientstr;
}
krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for "
"%.*s%s: %s"), addrbuf, (int) clen,
clientstr, cdots, (int) tlen, targetp, tdots,
errmsg ? errmsg : "success");
} else {
krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"),
addrbuf, (int) clen, clientstr, cdots,
errmsg ? errmsg : "success");
}
switch (ret) {
case KADM5_AUTH_CHANGEPW:
numresult = KRB5_KPASSWD_ACCESSDENIED;
break;
case KADM5_PASS_Q_TOOSHORT:
case KADM5_PASS_REUSE:
case KADM5_PASS_Q_CLASS:
case KADM5_PASS_Q_DICT:
case KADM5_PASS_Q_GENERIC:
case KADM5_PASS_TOOSOON:
numresult = KRB5_KPASSWD_SOFTERROR;
break;
case 0:
numresult = KRB5_KPASSWD_SUCCESS;
strlcpy(strresult, "", sizeof(strresult));
break;
default:
numresult = KRB5_KPASSWD_HARDERROR;
break;
}
chpwfail:
clear.length = 2 + strlen(strresult);
clear.data = (char *) malloc(clear.length);
ptr = clear.data;
*ptr++ = (numresult>>8) & 0xff;
*ptr++ = numresult & 0xff;
memcpy(ptr, strresult, strlen(strresult));
cipher = empty_data();
if (ap_rep.length) {
ret = krb5_auth_con_setaddrs(context, auth_context,
local_faddr->address, NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult,
"Failed storing client and server internet addresses",
sizeof(strresult));
} else {
ret = krb5_mk_priv(context, auth_context, &clear, &cipher,
&replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed encrypting reply",
sizeof(strresult));
}
}
}
/* if no KRB-PRIV was constructed, then we need a KRB-ERROR.
if this fails, just bail. there's nothing else we can do. */
if (cipher.length == 0) {
/* clear out ap_rep now, so that it won't be inserted in the
reply */
if (ap_rep.length) {
free(ap_rep.data);
ap_rep = empty_data();
}
krberror.ctime = 0;
krberror.cusec = 0;
krberror.susec = 0;
ret = krb5_timeofday(context, &krberror.stime);
if (ret)
goto bailout;
/* this is really icky. but it's what all the other callers
to mk_error do. */
krberror.error = ret;
krberror.error -= ERROR_TABLE_BASE_krb5;
if (krberror.error < 0 || krberror.error > 128)
krberror.error = KRB_ERR_GENERIC;
krberror.client = NULL;
ret = krb5_build_principal(context, &krberror.server,
strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret)
goto bailout;
krberror.text.length = 0;
krberror.e_data = clear;
ret = krb5_mk_error(context, &krberror, &cipher);
krb5_free_principal(context, krberror.server);
if (ret)
goto bailout;
}
/* construct the reply */
ret = alloc_data(rep, 6 + ap_rep.length + cipher.length);
if (ret)
goto bailout;
ptr = rep->data;
/* length */
*ptr++ = (rep->length>>8) & 0xff;
*ptr++ = rep->length & 0xff;
/* version == 0x0001 big-endian */
*ptr++ = 0;
*ptr++ = 1;
/* ap_rep length, big-endian */
*ptr++ = (ap_rep.length>>8) & 0xff;
*ptr++ = ap_rep.length & 0xff;
/* ap-rep data */
if (ap_rep.length) {
memcpy(ptr, ap_rep.data, ap_rep.length);
ptr += ap_rep.length;
}
/* krb-priv or krb-error */
memcpy(ptr, cipher.data, cipher.length);
bailout:
krb5_auth_con_free(context, auth_context);
krb5_free_principal(context, changepw);
krb5_free_ticket(context, ticket);
free(ap_rep.data);
free(clear.data);
free(cipher.data);
krb5_free_principal(context, target);
krb5_free_unparsed_name(context, targetstr);
krb5_free_unparsed_name(context, clientstr);
krb5_free_error_message(context, errmsg);
return ret;
}
/* Dispatch routine for set/change password */
void
dispatch(void *handle, struct sockaddr *local_saddr,
const krb5_fulladdr *remote_faddr, krb5_data *request, int is_tcp,
verto_ctx *vctx, loop_respond_fn respond, void *arg)
{
krb5_error_code ret;
krb5_keytab kt = NULL;
kadm5_server_handle_t server_handle = (kadm5_server_handle_t)handle;
krb5_fulladdr local_faddr;
krb5_address **local_kaddrs = NULL, local_kaddr_buf;
krb5_data *response = NULL;
if (local_saddr == NULL) {
ret = krb5_os_localaddr(server_handle->context, &local_kaddrs);
if (ret != 0)
goto egress;
local_faddr.address = local_kaddrs[0];
local_faddr.port = 0;
} else {
local_faddr.address = &local_kaddr_buf;
init_addr(&local_faddr, local_saddr);
}
ret = krb5_kt_resolve(server_handle->context, "KDB:", &kt);
if (ret != 0) {
krb5_klog_syslog(LOG_ERR, _("chpw: Couldn't open admin keytab %s"),
krb5_get_error_message(server_handle->context, ret));
goto egress;
}
response = k5alloc(sizeof(krb5_data), &ret);
if (response == NULL)
goto egress;
ret = process_chpw_request(server_handle->context,
handle,
server_handle->params.realm,
kt,
&local_faddr,
remote_faddr,
request,
response);
egress:
if (ret)
krb5_free_data(server_handle->context, response);
krb5_free_addresses(server_handle->context, local_kaddrs);
krb5_kt_close(server_handle->context, kt);
(*respond)(arg, ret, ret == 0 ? response : NULL);
}
| /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
#include "k5-int.h"
#include <kadm5/admin.h>
#include <syslog.h>
#include <adm_proto.h> /* krb5_klog_syslog */
#include <stdio.h>
#include <errno.h>
#include "kadm5/server_internal.h" /* XXX for kadm5_server_handle_t */
#include "misc.h"
#ifndef GETSOCKNAME_ARG3_TYPE
#define GETSOCKNAME_ARG3_TYPE int
#endif
#define RFC3244_VERSION 0xff80
static krb5_error_code
process_chpw_request(krb5_context context, void *server_handle, char *realm,
krb5_keytab keytab, const krb5_fulladdr *local_faddr,
const krb5_fulladdr *remote_faddr, krb5_data *req,
krb5_data *rep)
{
krb5_error_code ret;
char *ptr;
unsigned int plen, vno;
krb5_data ap_req, ap_rep = empty_data();
krb5_data cipher = empty_data(), clear = empty_data();
krb5_auth_context auth_context = NULL;
krb5_principal changepw = NULL;
krb5_principal client, target = NULL;
krb5_ticket *ticket = NULL;
krb5_replay_data replay;
krb5_error krberror;
int numresult;
char strresult[1024];
char *clientstr = NULL, *targetstr = NULL;
const char *errmsg = NULL;
size_t clen;
char *cdots;
struct sockaddr_storage ss;
socklen_t salen;
char addrbuf[100];
krb5_address *addr = remote_faddr->address;
*rep = empty_data();
if (req->length < 4) {
/* either this, or the server is printing bad messages,
or the caller passed in garbage */
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated", sizeof(strresult));
goto bailout;
}
ptr = req->data;
/* verify length */
plen = (*ptr++ & 0xff);
plen = (plen<<8) | (*ptr++ & 0xff);
if (plen != req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request length was inconsistent",
sizeof(strresult));
goto bailout;
}
/* verify version number */
vno = (*ptr++ & 0xff) ;
vno = (vno<<8) | (*ptr++ & 0xff);
if (vno != 1 && vno != RFC3244_VERSION) {
ret = KRB5KDC_ERR_BAD_PVNO;
numresult = KRB5_KPASSWD_BAD_VERSION;
snprintf(strresult, sizeof(strresult),
"Request contained unknown protocol version number %d", vno);
goto bailout;
}
/* read, check ap-req length */
ap_req.length = (*ptr++ & 0xff);
ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff);
if (ptr + ap_req.length >= req->data + req->length) {
ret = KRB5KRB_AP_ERR_MODIFIED;
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Request was truncated in AP-REQ",
sizeof(strresult));
goto bailout;
}
/* verify ap_req */
ap_req.data = ptr;
ptr += ap_req.length;
ret = krb5_auth_con_init(context, &auth_context);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_auth_con_setflags(context, auth_context,
KRB5_AUTH_CONTEXT_DO_SEQUENCE);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed initializing auth context",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_build_principal(context, &changepw, strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed building kadmin/changepw principal",
sizeof(strresult));
goto chpwfail;
}
ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab,
NULL, &ticket);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed reading application request",
sizeof(strresult));
goto chpwfail;
}
/* construct the ap-rep */
ret = krb5_mk_rep(context, auth_context, &ap_rep);
if (ret) {
numresult = KRB5_KPASSWD_AUTHERROR;
strlcpy(strresult, "Failed replying to application request",
sizeof(strresult));
goto chpwfail;
}
/* decrypt the ChangePasswdData */
cipher.length = (req->data + req->length) - ptr;
cipher.data = ptr;
/*
* Don't set a remote address in auth_context before calling krb5_rd_priv,
* so that we can work against clients behind a NAT. Reflection attacks
* aren't a concern since we use sequence numbers and since our requests
* don't look anything like our responses. Also don't set a local address,
* since we don't know what interface the request was received on.
*/
ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed decrypting request", sizeof(strresult));
goto chpwfail;
}
client = ticket->enc_part2->client;
/* decode ChangePasswdData for setpw requests */
if (vno == RFC3244_VERSION) {
krb5_data *clear_data;
ret = decode_krb5_setpw_req(&clear, &clear_data, &target);
if (ret != 0) {
numresult = KRB5_KPASSWD_MALFORMED;
strlcpy(strresult, "Failed decoding ChangePasswdData",
sizeof(strresult));
goto chpwfail;
}
zapfree(clear.data, clear.length);
clear = *clear_data;
free(clear_data);
if (target != NULL) {
ret = krb5_unparse_name(context, target, &targetstr);
if (ret != 0) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing target name for log",
sizeof(strresult));
goto chpwfail;
}
}
}
ret = krb5_unparse_name(context, client, &clientstr);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed unparsing client name for log",
sizeof(strresult));
goto chpwfail;
}
/* for cpw, verify that this is an AS_REQ ticket */
if (vno == 1 &&
(ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) {
numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED;
strlcpy(strresult, "Ticket must be derived from a password",
sizeof(strresult));
goto chpwfail;
}
/* change the password */
ptr = k5memdup0(clear.data, clear.length, &ret);
ret = schpw_util_wrapper(server_handle, client, target,
(ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0,
ptr, NULL, strresult, sizeof(strresult));
if (ret)
errmsg = krb5_get_error_message(context, ret);
/* zap the password */
zapfree(clear.data, clear.length);
zapfree(ptr, clear.length);
clear = empty_data();
clen = strlen(clientstr);
trunc_name(&clen, &cdots);
switch (addr->addrtype) {
case ADDRTYPE_INET: {
struct sockaddr_in *sin = ss2sin(&ss);
sin->sin_family = AF_INET;
memcpy(&sin->sin_addr, addr->contents, addr->length);
sin->sin_port = htons(remote_faddr->port);
salen = sizeof(*sin);
break;
}
case ADDRTYPE_INET6: {
struct sockaddr_in6 *sin6 = ss2sin6(&ss);
sin6->sin6_family = AF_INET6;
memcpy(&sin6->sin6_addr, addr->contents, addr->length);
sin6->sin6_port = htons(remote_faddr->port);
salen = sizeof(*sin6);
break;
}
default: {
struct sockaddr *sa = ss2sa(&ss);
sa->sa_family = AF_UNSPEC;
salen = sizeof(*sa);
break;
}
}
if (getnameinfo(ss2sa(&ss), salen,
addrbuf, sizeof(addrbuf), NULL, 0,
NI_NUMERICHOST | NI_NUMERICSERV) != 0)
strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf));
if (vno == RFC3244_VERSION) {
size_t tlen;
char *tdots;
const char *targetp;
if (target == NULL) {
tlen = clen;
tdots = cdots;
targetp = targetstr;
} else {
tlen = strlen(targetstr);
trunc_name(&tlen, &tdots);
targetp = clientstr;
}
krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for "
"%.*s%s: %s"), addrbuf, (int) clen,
clientstr, cdots, (int) tlen, targetp, tdots,
errmsg ? errmsg : "success");
} else {
krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"),
addrbuf, (int) clen, clientstr, cdots,
errmsg ? errmsg : "success");
}
switch (ret) {
case KADM5_AUTH_CHANGEPW:
numresult = KRB5_KPASSWD_ACCESSDENIED;
break;
case KADM5_PASS_Q_TOOSHORT:
case KADM5_PASS_REUSE:
case KADM5_PASS_Q_CLASS:
case KADM5_PASS_Q_DICT:
case KADM5_PASS_Q_GENERIC:
case KADM5_PASS_TOOSOON:
numresult = KRB5_KPASSWD_SOFTERROR;
break;
case 0:
numresult = KRB5_KPASSWD_SUCCESS;
strlcpy(strresult, "", sizeof(strresult));
break;
default:
numresult = KRB5_KPASSWD_HARDERROR;
break;
}
chpwfail:
clear.length = 2 + strlen(strresult);
clear.data = (char *) malloc(clear.length);
ptr = clear.data;
*ptr++ = (numresult>>8) & 0xff;
*ptr++ = numresult & 0xff;
memcpy(ptr, strresult, strlen(strresult));
cipher = empty_data();
if (ap_rep.length) {
ret = krb5_auth_con_setaddrs(context, auth_context,
local_faddr->address, NULL);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult,
"Failed storing client and server internet addresses",
sizeof(strresult));
} else {
ret = krb5_mk_priv(context, auth_context, &clear, &cipher,
&replay);
if (ret) {
numresult = KRB5_KPASSWD_HARDERROR;
strlcpy(strresult, "Failed encrypting reply",
sizeof(strresult));
}
}
}
/* if no KRB-PRIV was constructed, then we need a KRB-ERROR.
if this fails, just bail. there's nothing else we can do. */
if (cipher.length == 0) {
/* clear out ap_rep now, so that it won't be inserted in the
reply */
if (ap_rep.length) {
free(ap_rep.data);
ap_rep = empty_data();
}
krberror.ctime = 0;
krberror.cusec = 0;
krberror.susec = 0;
ret = krb5_timeofday(context, &krberror.stime);
if (ret)
goto bailout;
/* this is really icky. but it's what all the other callers
to mk_error do. */
krberror.error = ret;
krberror.error -= ERROR_TABLE_BASE_krb5;
if (krberror.error < 0 || krberror.error > 128)
krberror.error = KRB_ERR_GENERIC;
krberror.client = NULL;
ret = krb5_build_principal(context, &krberror.server,
strlen(realm), realm,
"kadmin", "changepw", NULL);
if (ret)
goto bailout;
krberror.text.length = 0;
krberror.e_data = clear;
ret = krb5_mk_error(context, &krberror, &cipher);
krb5_free_principal(context, krberror.server);
if (ret)
goto bailout;
}
/* construct the reply */
ret = alloc_data(rep, 6 + ap_rep.length + cipher.length);
if (ret)
goto bailout;
ptr = rep->data;
/* length */
*ptr++ = (rep->length>>8) & 0xff;
*ptr++ = rep->length & 0xff;
/* version == 0x0001 big-endian */
*ptr++ = 0;
*ptr++ = 1;
/* ap_rep length, big-endian */
*ptr++ = (ap_rep.length>>8) & 0xff;
*ptr++ = ap_rep.length & 0xff;
/* ap-rep data */
if (ap_rep.length) {
memcpy(ptr, ap_rep.data, ap_rep.length);
ptr += ap_rep.length;
}
/* krb-priv or krb-error */
memcpy(ptr, cipher.data, cipher.length);
bailout:
krb5_auth_con_free(context, auth_context);
krb5_free_principal(context, changepw);
krb5_free_ticket(context, ticket);
free(ap_rep.data);
free(clear.data);
free(cipher.data);
krb5_free_principal(context, target);
krb5_free_unparsed_name(context, targetstr);
krb5_free_unparsed_name(context, clientstr);
krb5_free_error_message(context, errmsg);
return ret;
}
/* Dispatch routine for set/change password */
void
dispatch(void *handle, struct sockaddr *local_saddr,
const krb5_fulladdr *remote_faddr, krb5_data *request, int is_tcp,
verto_ctx *vctx, loop_respond_fn respond, void *arg)
{
krb5_error_code ret;
krb5_keytab kt = NULL;
kadm5_server_handle_t server_handle = (kadm5_server_handle_t)handle;
krb5_fulladdr local_faddr;
krb5_address **local_kaddrs = NULL, local_kaddr_buf;
krb5_data *response = NULL;
if (local_saddr == NULL) {
ret = krb5_os_localaddr(server_handle->context, &local_kaddrs);
if (ret != 0)
goto egress;
local_faddr.address = local_kaddrs[0];
local_faddr.port = 0;
} else {
local_faddr.address = &local_kaddr_buf;
init_addr(&local_faddr, local_saddr);
}
ret = krb5_kt_resolve(server_handle->context, "KDB:", &kt);
if (ret != 0) {
krb5_klog_syslog(LOG_ERR, _("chpw: Couldn't open admin keytab %s"),
krb5_get_error_message(server_handle->context, ret));
goto egress;
}
response = k5alloc(sizeof(krb5_data), &ret);
if (response == NULL)
goto egress;
ret = process_chpw_request(server_handle->context,
handle,
server_handle->params.realm,
kt,
&local_faddr,
remote_faddr,
request,
response);
egress:
if (ret)
krb5_free_data(server_handle->context, response);
krb5_free_addresses(server_handle->context, local_kaddrs);
krb5_kt_close(server_handle->context, kt);
(*respond)(arg, ret, ret == 0 ? response : NULL);
}
|
8,079,448,952,456 | /* lib/gssapi/mechglue/mglueP.h */
/*
* Copyright (c) 1995, by Sun Microsystems, Inc.
* All rights reserved.
*/
/* This header contains the private mechglue definitions. */
#ifndef _GSS_MECHGLUEP_H
#define _GSS_MECHGLUEP_H
#include "autoconf.h"
#include "mechglue.h"
#include "gssapiP_generic.h"
#define g_OID_copy(o1, o2) \
do { \
memcpy((o1)->elements, (o2)->elements, (o2)->length); \
(o1)->length = (o2)->length; \
} while (0)
/*
* Array of context IDs typed by mechanism OID
*/
typedef struct gss_union_ctx_id_struct {
struct gss_union_ctx_id_struct *loopback;
struct gss_union_ctx_id_struct *interposer;
gss_OID mech_type;
gss_ctx_id_t internal_ctx_id;
} gss_union_ctx_id_desc, *gss_union_ctx_id_t;
/*
* Generic GSSAPI names. A name can either be a generic name, or a
* mechanism specific name....
*/
typedef struct gss_name_struct {
struct gss_name_struct *loopback;
gss_OID name_type;
gss_buffer_t external_name;
/*
* These last two fields are only filled in for mechanism
* names.
*/
gss_OID mech_type;
gss_name_t mech_name;
} gss_union_name_desc, *gss_union_name_t;
/*
* Structure for holding list of mechanism-specific name types
*/
typedef struct gss_mech_spec_name_t {
gss_OID name_type;
gss_OID mech;
struct gss_mech_spec_name_t *next, *prev;
} gss_mech_spec_name_desc, *gss_mech_spec_name;
/*
* Set of Credentials typed on mechanism OID
*/
typedef struct gss_cred_id_struct {
struct gss_cred_id_struct *loopback;
int count;
gss_OID mechs_array;
gss_cred_id_t *cred_array;
} gss_union_cred_desc, *gss_union_cred_t;
/*
* Rudimentary pointer validation macro to check whether the
* "loopback" field of an opaque struct points back to itself. This
* field also catches some programming errors where an opaque pointer
* is passed to a function expecting the address of the opaque
* pointer.
*/
#define GSSINT_CHK_LOOP(p) (!((p) != NULL && (p)->loopback == (p)))
/********************************************************/
/* The Mechanism Dispatch Table -- a mechanism needs to */
/* define one of these and provide a function to return */
/* it to initialize the GSSAPI library */
int gssint_mechglue_initialize_library(void);
OM_uint32 gssint_get_mech_type_oid(gss_OID OID, gss_buffer_t token);
/*
* This is the definition of the mechs_array struct, which is used to
* define the mechs array table. This table is used to indirectly
* access mechanism specific versions of the gssapi routines through
* the routines in the glue module (gssd_mech_glue.c)
*
* This contants all of the functions defined in gssapi.h except for
* gss_release_buffer() and gss_release_oid_set(), which I am
* assuming, for now, to be equal across mechanisms.
*/
typedef struct gss_config {
gss_OID_desc mech_type;
void * context;
OM_uint32 (KRB5_CALLCONV *gss_acquire_cred)
(
OM_uint32*, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
int, /* cred_usage */
gss_cred_id_t*, /* output_cred_handle */
gss_OID_set*, /* actual_mechs */
OM_uint32* /* time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_release_cred)
(
OM_uint32*, /* minor_status */
gss_cred_id_t* /* cred_handle */
);
OM_uint32 (KRB5_CALLCONV *gss_init_sec_context)
(
OM_uint32*, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t*, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t, /* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID*, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32* /* time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_accept_sec_context)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_cred_id_t, /* verifier_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t, /* input_chan_bindings */
gss_name_t*, /* src_name */
gss_OID*, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
gss_cred_id_t* /* delegated_cred_handle */
);
OM_uint32 (KRB5_CALLCONV *gss_process_context_token)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t /* token_buffer */
);
OM_uint32 (KRB5_CALLCONV *gss_delete_sec_context)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_buffer_t /* output_token */
);
OM_uint32 (KRB5_CALLCONV *gss_context_time)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
OM_uint32* /* time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_get_mic)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_buffer_t, /* message_buffer */
gss_buffer_t /* message_token */
);
OM_uint32 (KRB5_CALLCONV *gss_verify_mic)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* message_buffer */
gss_buffer_t, /* token_buffer */
gss_qop_t* /* qop_state */
);
OM_uint32 (KRB5_CALLCONV *gss_wrap)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_message_buffer */
int*, /* conf_state */
gss_buffer_t /* output_message_buffer */
);
OM_uint32 (KRB5_CALLCONV *gss_unwrap)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* output_message_buffer */
int*, /* conf_state */
gss_qop_t* /* qop_state */
);
OM_uint32 (KRB5_CALLCONV *gss_display_status)
(
OM_uint32*, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type */
OM_uint32*, /* message_context */
gss_buffer_t /* status_string */
);
OM_uint32 (KRB5_CALLCONV *gss_indicate_mechs)
(
OM_uint32*, /* minor_status */
gss_OID_set* /* mech_set */
);
OM_uint32 (KRB5_CALLCONV *gss_compare_name)
(
OM_uint32*, /* minor_status */
gss_name_t, /* name1 */
gss_name_t, /* name2 */
int* /* name_equal */
);
OM_uint32 (KRB5_CALLCONV *gss_display_name)
(
OM_uint32*, /* minor_status */
gss_name_t, /* input_name */
gss_buffer_t, /* output_name_buffer */
gss_OID* /* output_name_type */
);
OM_uint32 (KRB5_CALLCONV *gss_import_name)
(
OM_uint32*, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type */
gss_name_t* /* output_name */
);
OM_uint32 (KRB5_CALLCONV *gss_release_name)
(
OM_uint32*, /* minor_status */
gss_name_t* /* input_name */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_cred)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_name_t *, /* name */
OM_uint32 *, /* lifetime */
int *, /* cred_usage */
gss_OID_set * /* mechanisms */
);
OM_uint32 (KRB5_CALLCONV *gss_add_cred)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_name_t, /* desired_name */
gss_OID, /* desired_mech */
gss_cred_usage_t, /* cred_usage */
OM_uint32, /* initiator_time_req */
OM_uint32, /* acceptor_time_req */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *, /* initiator_time_rec */
OM_uint32 * /* acceptor_time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_export_sec_context)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_buffer_t /* interprocess_token */
);
OM_uint32 (KRB5_CALLCONV *gss_import_sec_context)
(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t * /* context_handle */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_cred_by_mech)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_OID, /* mech_type */
gss_name_t *, /* name */
OM_uint32 *, /* initiator_lifetime */
OM_uint32 *, /* acceptor_lifetime */
gss_cred_usage_t * /* cred_usage */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_names_for_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* mechanism */
gss_OID_set * /* name_types */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_context)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_name_t *, /* src_name */
gss_name_t *, /* targ_name */
OM_uint32 *, /* lifetime_rec */
gss_OID *, /* mech_type */
OM_uint32 *, /* ctx_flags */
int *, /* locally_initiated */
int * /* open */
);
OM_uint32 (KRB5_CALLCONV *gss_internal_release_oid)
(
OM_uint32 *, /* minor_status */
gss_OID * /* OID */
);
OM_uint32 (KRB5_CALLCONV *gss_wrap_size_limit)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
OM_uint32, /* req_output_size */
OM_uint32 * /* max_input_size */
);
OM_uint32 (KRB5_CALLCONV *gss_localname)
(
OM_uint32 *, /* minor */
const gss_name_t, /* name */
gss_const_OID, /* mech_type */
gss_buffer_t /* localname */
);
OM_uint32 (KRB5_CALLCONV *gssspi_authorize_localname)
(
OM_uint32 *, /* minor_status */
const gss_name_t, /* pname */
gss_const_buffer_t, /* local user */
gss_const_OID /* local nametype */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_export_name)
(
OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_buffer_t /* exported_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_duplicate_name)
(
OM_uint32*, /* minor_status */
const gss_name_t, /* input_name */
gss_name_t * /* output_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_store_cred)
(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* input_cred */
gss_cred_usage_t, /* cred_usage */
const gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t * /* cred_usage_stored */
/* */);
/* GGF extensions */
OM_uint32 (KRB5_CALLCONV *gss_inquire_sec_context_by_oid)
(
OM_uint32 *, /* minor_status */
const gss_ctx_id_t, /* context_handle */
const gss_OID, /* OID */
gss_buffer_set_t * /* data_set */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_cred_by_oid)
(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* cred_handle */
const gss_OID, /* OID */
gss_buffer_set_t * /* data_set */
);
OM_uint32 (KRB5_CALLCONV *gss_set_sec_context_option)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
const gss_OID, /* OID */
const gss_buffer_t /* value */
);
OM_uint32 (KRB5_CALLCONV *gssspi_set_cred_option)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t *, /* cred_handle */
const gss_OID, /* OID */
const gss_buffer_t /* value */
);
OM_uint32 (KRB5_CALLCONV *gssspi_mech_invoke)
(
OM_uint32*, /* minor_status */
const gss_OID, /* mech OID */
const gss_OID, /* OID */
gss_buffer_t /* value */
);
/* AEAD extensions */
OM_uint32 (KRB5_CALLCONV *gss_wrap_aead)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* input_payload_buffer */
int *, /* conf_state */
gss_buffer_t /* output_message_buffer */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_unwrap_aead)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* output_payload_buffer */
int *, /* conf_state */
gss_qop_t * /* qop_state */
/* */);
/* SSPI extensions */
OM_uint32 (KRB5_CALLCONV *gss_wrap_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_unwrap_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int *, /* conf_state */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_wrap_iov_length)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag*/
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_complete_auth_token)
(
OM_uint32*, /* minor_status */
const gss_ctx_id_t, /* context_handle */
gss_buffer_t /* input_message_buffer */
);
/* New for 1.8 */
OM_uint32 (KRB5_CALLCONV *gss_acquire_cred_impersonate_name)
(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* impersonator_cred_handle */
const gss_name_t, /* desired_name */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 * /* time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_add_cred_impersonate_name)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
const gss_cred_id_t, /* impersonator_cred_handle */
const gss_name_t, /* desired_name */
const gss_OID, /* desired_mech */
gss_cred_usage_t, /* cred_usage */
OM_uint32, /* initiator_time_req */
OM_uint32, /* acceptor_time_req */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *, /* initiator_time_rec */
OM_uint32 * /* acceptor_time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_display_name_ext)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_OID, /* display_as_name_type */
gss_buffer_t /* display_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_name)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
int *, /* name_is_MN */
gss_OID *, /* MN_mech */
gss_buffer_set_t * /* attrs */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_get_name_attribute)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t, /* attr */
int *, /* authenticated */
int *, /* complete */
gss_buffer_t, /* value */
gss_buffer_t, /* display_value */
int * /* more */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_set_name_attribute)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
int, /* complete */
gss_buffer_t, /* attr */
gss_buffer_t /* value */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_delete_name_attribute)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t /* attr */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_export_name_composite)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t /* exp_composite_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_map_name_to_any)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
int, /* authenticated */
gss_buffer_t, /* type_id */
gss_any_t * /* output */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_release_any_name_mapping)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t, /* type_id */
gss_any_t * /* input */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_pseudo_random)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context */
int, /* prf_key */
const gss_buffer_t, /* prf_in */
ssize_t, /* desired_output_len */
gss_buffer_t /* prf_out */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_set_neg_mechs)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
const gss_OID_set /* mech_set */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_saslname_for_mech)
(
OM_uint32 *, /* minor_status */
const gss_OID, /* desired_mech */
gss_buffer_t, /* sasl_mech_name */
gss_buffer_t, /* mech_name */
gss_buffer_t /* mech_description */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_mech_for_saslname)
(
OM_uint32 *, /* minor_status */
const gss_buffer_t, /* sasl_mech_name */
gss_OID * /* mech_type */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_attrs_for_mech)
(
OM_uint32 *, /* minor_status */
gss_const_OID, /* mech */
gss_OID_set *, /* mech_attrs */
gss_OID_set * /* known_mech_attrs */
/* */);
/* Credential store extensions */
OM_uint32 (KRB5_CALLCONV *gss_acquire_cred_from)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_const_key_value_set_t, /* cred_store */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 * /* time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_store_cred_into)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_cred_usage_t, /* input_usage */
gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_const_key_value_set_t, /* cred_store */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t * /* cred_usage_stored */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_acquire_cred_with_password)
(
OM_uint32 *, /* minor_status */
const gss_name_t, /* desired_name */
const gss_buffer_t, /* password */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
int, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 * /* time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_export_cred)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_buffer_t /* token */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_import_cred)
(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* token */
gss_cred_id_t * /* cred_handle */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_import_sec_context_by_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* desired_mech */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t * /* context_handle */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_import_name_by_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* mech_type */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type */
gss_name_t* /* output_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_import_cred_by_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* mech_type */
gss_buffer_t, /* token */
gss_cred_id_t * /* cred_handle */
/* */);
/* get_mic_iov extensions, added in 1.12 */
OM_uint32 (KRB5_CALLCONV *gss_get_mic_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 (KRB5_CALLCONV *gss_verify_mic_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 (KRB5_CALLCONV *gss_get_mic_iov_length)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
} *gss_mechanism;
/*
* In the user space we use a wrapper structure to encompass the
* mechanism entry points. The wrapper contain the mechanism
* entry points and other data which is only relevant to the gss-api
* layer. In the kernel we use only the gss_config strucutre because
* the kernal does not cantain any of the extra gss-api specific data.
*/
typedef struct gss_mech_config {
char *kmodName; /* kernel module name */
char *uLibName; /* user library name */
char *mechNameStr; /* mechanism string name */
char *optionStr; /* optional mech parameters */
void *dl_handle; /* RTLD object handle for the mech */
gss_OID mech_type; /* mechanism oid */
gss_mechanism mech; /* mechanism initialization struct */
int priority; /* mechanism preference order */
int freeMech; /* free mech table */
int is_interposer; /* interposer mechanism flag */
gss_OID int_mech_type; /* points to the interposer OID */
gss_mechanism int_mech; /* points to the interposer mech */
struct gss_mech_config *next; /* next element in the list */
} *gss_mech_info;
/********************************************************/
/* Internal mechglue routines */
#if 0
int gssint_mechglue_init(void);
void gssint_mechglue_fini(void);
#endif
OM_uint32 gssint_select_mech_type(OM_uint32 *minor, gss_const_OID in_oid,
gss_OID *selected_oid);
gss_OID gssint_get_public_oid(gss_const_OID internal_oid);
OM_uint32 gssint_make_public_oid_set(OM_uint32 *minor_status, gss_OID oids,
int count, gss_OID_set *public_set);
gss_mechanism gssint_get_mechanism (gss_const_OID);
OM_uint32 gssint_get_mech_type (gss_OID, gss_buffer_t);
char *gssint_get_kmodName(const gss_OID);
char *gssint_get_modOptions(const gss_OID);
OM_uint32 gssint_import_internal_name (OM_uint32 *, gss_OID, gss_union_name_t,
gss_name_t *);
OM_uint32 gssint_export_internal_name(OM_uint32 *, const gss_OID,
const gss_name_t, gss_buffer_t);
OM_uint32 gssint_display_internal_name (OM_uint32 *, gss_OID, gss_name_t,
gss_buffer_t, gss_OID *);
OM_uint32 gssint_release_internal_name (OM_uint32 *, gss_OID, gss_name_t *);
OM_uint32 gssint_delete_internal_sec_context (OM_uint32 *, gss_OID,
gss_ctx_id_t *, gss_buffer_t);
#ifdef _GSS_STATIC_LINK
int gssint_register_mechinfo(gss_mech_info template);
#endif
OM_uint32 gssint_convert_name_to_union_name
(OM_uint32 *, /* minor_status */
gss_mechanism, /* mech */
gss_name_t, /* internal_name */
gss_name_t * /* external_name */
);
gss_cred_id_t gssint_get_mechanism_cred
(gss_union_cred_t, /* union_cred */
gss_OID /* mech_type */
);
OM_uint32 gssint_create_copy_buffer(
const gss_buffer_t, /* src buffer */
gss_buffer_t *, /* destination buffer */
int /* NULL terminate buffer ? */
);
OM_uint32 gssint_copy_oid_set(
OM_uint32 *, /* minor_status */
const gss_OID_set_desc * const, /* oid set */
gss_OID_set * /* new oid set */
);
gss_OID gss_find_mechanism_from_name_type (gss_OID); /* name_type */
OM_uint32 gss_add_mech_name_type
(OM_uint32 *, /* minor_status */
gss_OID, /* name_type */
gss_OID /* mech */
);
/*
* Sun extensions to GSS-API v2
*/
int
gssint_get_der_length(
unsigned char **, /* buf */
unsigned int, /* buf_len */
unsigned int * /* bytes */
);
unsigned int
gssint_der_length_size(unsigned int /* len */);
int
gssint_put_der_length(
unsigned int, /* length */
unsigned char **, /* buf */
unsigned int /* max_len */
);
OM_uint32
gssint_wrap_aead (gss_mechanism, /* mech */
OM_uint32 *, /* minor_status */
gss_union_ctx_id_t, /* ctx */
int, /* conf_req_flag */
gss_qop_t, /* qop_req_flag */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* input_payload_buffer */
int *, /* conf_state */
gss_buffer_t); /* output_message_buffer */
OM_uint32
gssint_unwrap_aead (gss_mechanism, /* mech */
OM_uint32 *, /* minor_status */
gss_union_ctx_id_t, /* ctx */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* output_payload_buffer */
int *, /* conf_state */
gss_qop_t *); /* qop_state */
/* Use this to map an error code that was returned from a mech
operation; the mech will be asked to produce the associated error
messages.
Remember that if the minor status code cannot be returned to the
caller (e.g., if it's stuffed in an automatic variable and then
ignored), then we don't care about producing a mapping. */
#define map_error(MINORP, MECH) \
(*(MINORP) = gssint_mecherrmap_map(*(MINORP), &(MECH)->mech_type))
#define map_error_oid(MINORP, MECHOID) \
(*(MINORP) = gssint_mecherrmap_map(*(MINORP), (MECHOID)))
/* Use this to map an errno value or com_err error code being
generated within the mechglue code (e.g., by calling generic oid
ops). Any errno or com_err values produced by mech operations
should be processed with map_error. This means they'll be stored
separately even if the mech uses com_err, because we can't assume
that it will use com_err. */
#define map_errcode(MINORP) \
(*(MINORP) = gssint_mecherrmap_map_errcode(*(MINORP)))
#endif /* _GSS_MECHGLUEP_H */
| /* lib/gssapi/mechglue/mglueP.h */
/*
* Copyright (c) 1995, by Sun Microsystems, Inc.
* All rights reserved.
*/
/* This header contains the private mechglue definitions. */
#ifndef _GSS_MECHGLUEP_H
#define _GSS_MECHGLUEP_H
#include "autoconf.h"
#include "mechglue.h"
#include "gssapiP_generic.h"
#define g_OID_copy(o1, o2) \
do { \
memcpy((o1)->elements, (o2)->elements, (o2)->length); \
(o1)->length = (o2)->length; \
} while (0)
/*
* Array of context IDs typed by mechanism OID
*/
typedef struct gss_union_ctx_id_struct {
struct gss_union_ctx_id_struct *loopback;
gss_OID mech_type;
gss_ctx_id_t internal_ctx_id;
} gss_union_ctx_id_desc, *gss_union_ctx_id_t;
/*
* Generic GSSAPI names. A name can either be a generic name, or a
* mechanism specific name....
*/
typedef struct gss_name_struct {
struct gss_name_struct *loopback;
gss_OID name_type;
gss_buffer_t external_name;
/*
* These last two fields are only filled in for mechanism
* names.
*/
gss_OID mech_type;
gss_name_t mech_name;
} gss_union_name_desc, *gss_union_name_t;
/*
* Structure for holding list of mechanism-specific name types
*/
typedef struct gss_mech_spec_name_t {
gss_OID name_type;
gss_OID mech;
struct gss_mech_spec_name_t *next, *prev;
} gss_mech_spec_name_desc, *gss_mech_spec_name;
/*
* Set of Credentials typed on mechanism OID
*/
typedef struct gss_cred_id_struct {
struct gss_cred_id_struct *loopback;
int count;
gss_OID mechs_array;
gss_cred_id_t *cred_array;
} gss_union_cred_desc, *gss_union_cred_t;
/*
* Rudimentary pointer validation macro to check whether the
* "loopback" field of an opaque struct points back to itself. This
* field also catches some programming errors where an opaque pointer
* is passed to a function expecting the address of the opaque
* pointer.
*/
#define GSSINT_CHK_LOOP(p) (!((p) != NULL && (p)->loopback == (p)))
/********************************************************/
/* The Mechanism Dispatch Table -- a mechanism needs to */
/* define one of these and provide a function to return */
/* it to initialize the GSSAPI library */
int gssint_mechglue_initialize_library(void);
OM_uint32 gssint_get_mech_type_oid(gss_OID OID, gss_buffer_t token);
/*
* This is the definition of the mechs_array struct, which is used to
* define the mechs array table. This table is used to indirectly
* access mechanism specific versions of the gssapi routines through
* the routines in the glue module (gssd_mech_glue.c)
*
* This contants all of the functions defined in gssapi.h except for
* gss_release_buffer() and gss_release_oid_set(), which I am
* assuming, for now, to be equal across mechanisms.
*/
typedef struct gss_config {
gss_OID_desc mech_type;
void * context;
OM_uint32 (KRB5_CALLCONV *gss_acquire_cred)
(
OM_uint32*, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
int, /* cred_usage */
gss_cred_id_t*, /* output_cred_handle */
gss_OID_set*, /* actual_mechs */
OM_uint32* /* time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_release_cred)
(
OM_uint32*, /* minor_status */
gss_cred_id_t* /* cred_handle */
);
OM_uint32 (KRB5_CALLCONV *gss_init_sec_context)
(
OM_uint32*, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t*, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t, /* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID*, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32* /* time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_accept_sec_context)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_cred_id_t, /* verifier_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t, /* input_chan_bindings */
gss_name_t*, /* src_name */
gss_OID*, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
gss_cred_id_t* /* delegated_cred_handle */
);
OM_uint32 (KRB5_CALLCONV *gss_process_context_token)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t /* token_buffer */
);
OM_uint32 (KRB5_CALLCONV *gss_delete_sec_context)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_buffer_t /* output_token */
);
OM_uint32 (KRB5_CALLCONV *gss_context_time)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
OM_uint32* /* time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_get_mic)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_buffer_t, /* message_buffer */
gss_buffer_t /* message_token */
);
OM_uint32 (KRB5_CALLCONV *gss_verify_mic)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* message_buffer */
gss_buffer_t, /* token_buffer */
gss_qop_t* /* qop_state */
);
OM_uint32 (KRB5_CALLCONV *gss_wrap)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_message_buffer */
int*, /* conf_state */
gss_buffer_t /* output_message_buffer */
);
OM_uint32 (KRB5_CALLCONV *gss_unwrap)
(
OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* output_message_buffer */
int*, /* conf_state */
gss_qop_t* /* qop_state */
);
OM_uint32 (KRB5_CALLCONV *gss_display_status)
(
OM_uint32*, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type */
OM_uint32*, /* message_context */
gss_buffer_t /* status_string */
);
OM_uint32 (KRB5_CALLCONV *gss_indicate_mechs)
(
OM_uint32*, /* minor_status */
gss_OID_set* /* mech_set */
);
OM_uint32 (KRB5_CALLCONV *gss_compare_name)
(
OM_uint32*, /* minor_status */
gss_name_t, /* name1 */
gss_name_t, /* name2 */
int* /* name_equal */
);
OM_uint32 (KRB5_CALLCONV *gss_display_name)
(
OM_uint32*, /* minor_status */
gss_name_t, /* input_name */
gss_buffer_t, /* output_name_buffer */
gss_OID* /* output_name_type */
);
OM_uint32 (KRB5_CALLCONV *gss_import_name)
(
OM_uint32*, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type */
gss_name_t* /* output_name */
);
OM_uint32 (KRB5_CALLCONV *gss_release_name)
(
OM_uint32*, /* minor_status */
gss_name_t* /* input_name */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_cred)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_name_t *, /* name */
OM_uint32 *, /* lifetime */
int *, /* cred_usage */
gss_OID_set * /* mechanisms */
);
OM_uint32 (KRB5_CALLCONV *gss_add_cred)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_name_t, /* desired_name */
gss_OID, /* desired_mech */
gss_cred_usage_t, /* cred_usage */
OM_uint32, /* initiator_time_req */
OM_uint32, /* acceptor_time_req */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *, /* initiator_time_rec */
OM_uint32 * /* acceptor_time_rec */
);
OM_uint32 (KRB5_CALLCONV *gss_export_sec_context)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_buffer_t /* interprocess_token */
);
OM_uint32 (KRB5_CALLCONV *gss_import_sec_context)
(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t * /* context_handle */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_cred_by_mech)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_OID, /* mech_type */
gss_name_t *, /* name */
OM_uint32 *, /* initiator_lifetime */
OM_uint32 *, /* acceptor_lifetime */
gss_cred_usage_t * /* cred_usage */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_names_for_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* mechanism */
gss_OID_set * /* name_types */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_context)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_name_t *, /* src_name */
gss_name_t *, /* targ_name */
OM_uint32 *, /* lifetime_rec */
gss_OID *, /* mech_type */
OM_uint32 *, /* ctx_flags */
int *, /* locally_initiated */
int * /* open */
);
OM_uint32 (KRB5_CALLCONV *gss_internal_release_oid)
(
OM_uint32 *, /* minor_status */
gss_OID * /* OID */
);
OM_uint32 (KRB5_CALLCONV *gss_wrap_size_limit)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
OM_uint32, /* req_output_size */
OM_uint32 * /* max_input_size */
);
OM_uint32 (KRB5_CALLCONV *gss_localname)
(
OM_uint32 *, /* minor */
const gss_name_t, /* name */
gss_const_OID, /* mech_type */
gss_buffer_t /* localname */
);
OM_uint32 (KRB5_CALLCONV *gssspi_authorize_localname)
(
OM_uint32 *, /* minor_status */
const gss_name_t, /* pname */
gss_const_buffer_t, /* local user */
gss_const_OID /* local nametype */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_export_name)
(
OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_buffer_t /* exported_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_duplicate_name)
(
OM_uint32*, /* minor_status */
const gss_name_t, /* input_name */
gss_name_t * /* output_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_store_cred)
(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* input_cred */
gss_cred_usage_t, /* cred_usage */
const gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t * /* cred_usage_stored */
/* */);
/* GGF extensions */
OM_uint32 (KRB5_CALLCONV *gss_inquire_sec_context_by_oid)
(
OM_uint32 *, /* minor_status */
const gss_ctx_id_t, /* context_handle */
const gss_OID, /* OID */
gss_buffer_set_t * /* data_set */
);
OM_uint32 (KRB5_CALLCONV *gss_inquire_cred_by_oid)
(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* cred_handle */
const gss_OID, /* OID */
gss_buffer_set_t * /* data_set */
);
OM_uint32 (KRB5_CALLCONV *gss_set_sec_context_option)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
const gss_OID, /* OID */
const gss_buffer_t /* value */
);
OM_uint32 (KRB5_CALLCONV *gssspi_set_cred_option)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t *, /* cred_handle */
const gss_OID, /* OID */
const gss_buffer_t /* value */
);
OM_uint32 (KRB5_CALLCONV *gssspi_mech_invoke)
(
OM_uint32*, /* minor_status */
const gss_OID, /* mech OID */
const gss_OID, /* OID */
gss_buffer_t /* value */
);
/* AEAD extensions */
OM_uint32 (KRB5_CALLCONV *gss_wrap_aead)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* input_payload_buffer */
int *, /* conf_state */
gss_buffer_t /* output_message_buffer */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_unwrap_aead)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* output_payload_buffer */
int *, /* conf_state */
gss_qop_t * /* qop_state */
/* */);
/* SSPI extensions */
OM_uint32 (KRB5_CALLCONV *gss_wrap_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_unwrap_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int *, /* conf_state */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_wrap_iov_length)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag*/
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_complete_auth_token)
(
OM_uint32*, /* minor_status */
const gss_ctx_id_t, /* context_handle */
gss_buffer_t /* input_message_buffer */
);
/* New for 1.8 */
OM_uint32 (KRB5_CALLCONV *gss_acquire_cred_impersonate_name)
(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* impersonator_cred_handle */
const gss_name_t, /* desired_name */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 * /* time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_add_cred_impersonate_name)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
const gss_cred_id_t, /* impersonator_cred_handle */
const gss_name_t, /* desired_name */
const gss_OID, /* desired_mech */
gss_cred_usage_t, /* cred_usage */
OM_uint32, /* initiator_time_req */
OM_uint32, /* acceptor_time_req */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *, /* initiator_time_rec */
OM_uint32 * /* acceptor_time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_display_name_ext)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_OID, /* display_as_name_type */
gss_buffer_t /* display_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_name)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
int *, /* name_is_MN */
gss_OID *, /* MN_mech */
gss_buffer_set_t * /* attrs */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_get_name_attribute)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t, /* attr */
int *, /* authenticated */
int *, /* complete */
gss_buffer_t, /* value */
gss_buffer_t, /* display_value */
int * /* more */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_set_name_attribute)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
int, /* complete */
gss_buffer_t, /* attr */
gss_buffer_t /* value */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_delete_name_attribute)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t /* attr */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_export_name_composite)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t /* exp_composite_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_map_name_to_any)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
int, /* authenticated */
gss_buffer_t, /* type_id */
gss_any_t * /* output */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_release_any_name_mapping)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* name */
gss_buffer_t, /* type_id */
gss_any_t * /* input */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_pseudo_random)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context */
int, /* prf_key */
const gss_buffer_t, /* prf_in */
ssize_t, /* desired_output_len */
gss_buffer_t /* prf_out */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_set_neg_mechs)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
const gss_OID_set /* mech_set */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_saslname_for_mech)
(
OM_uint32 *, /* minor_status */
const gss_OID, /* desired_mech */
gss_buffer_t, /* sasl_mech_name */
gss_buffer_t, /* mech_name */
gss_buffer_t /* mech_description */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_mech_for_saslname)
(
OM_uint32 *, /* minor_status */
const gss_buffer_t, /* sasl_mech_name */
gss_OID * /* mech_type */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_inquire_attrs_for_mech)
(
OM_uint32 *, /* minor_status */
gss_const_OID, /* mech */
gss_OID_set *, /* mech_attrs */
gss_OID_set * /* known_mech_attrs */
/* */);
/* Credential store extensions */
OM_uint32 (KRB5_CALLCONV *gss_acquire_cred_from)
(
OM_uint32 *, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_const_key_value_set_t, /* cred_store */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 * /* time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_store_cred_into)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_cred_usage_t, /* input_usage */
gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_const_key_value_set_t, /* cred_store */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t * /* cred_usage_stored */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_acquire_cred_with_password)
(
OM_uint32 *, /* minor_status */
const gss_name_t, /* desired_name */
const gss_buffer_t, /* password */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
int, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 * /* time_rec */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_export_cred)
(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_buffer_t /* token */
/* */);
OM_uint32 (KRB5_CALLCONV *gss_import_cred)
(
OM_uint32 *, /* minor_status */
gss_buffer_t, /* token */
gss_cred_id_t * /* cred_handle */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_import_sec_context_by_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* desired_mech */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t * /* context_handle */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_import_name_by_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* mech_type */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type */
gss_name_t* /* output_name */
/* */);
OM_uint32 (KRB5_CALLCONV *gssspi_import_cred_by_mech)
(
OM_uint32 *, /* minor_status */
gss_OID, /* mech_type */
gss_buffer_t, /* token */
gss_cred_id_t * /* cred_handle */
/* */);
/* get_mic_iov extensions, added in 1.12 */
OM_uint32 (KRB5_CALLCONV *gss_get_mic_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 (KRB5_CALLCONV *gss_verify_mic_iov)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 (KRB5_CALLCONV *gss_get_mic_iov_length)
(
OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
} *gss_mechanism;
/*
* In the user space we use a wrapper structure to encompass the
* mechanism entry points. The wrapper contain the mechanism
* entry points and other data which is only relevant to the gss-api
* layer. In the kernel we use only the gss_config strucutre because
* the kernal does not cantain any of the extra gss-api specific data.
*/
typedef struct gss_mech_config {
char *kmodName; /* kernel module name */
char *uLibName; /* user library name */
char *mechNameStr; /* mechanism string name */
char *optionStr; /* optional mech parameters */
void *dl_handle; /* RTLD object handle for the mech */
gss_OID mech_type; /* mechanism oid */
gss_mechanism mech; /* mechanism initialization struct */
int priority; /* mechanism preference order */
int freeMech; /* free mech table */
int is_interposer; /* interposer mechanism flag */
gss_OID int_mech_type; /* points to the interposer OID */
gss_mechanism int_mech; /* points to the interposer mech */
struct gss_mech_config *next; /* next element in the list */
} *gss_mech_info;
/********************************************************/
/* Internal mechglue routines */
#if 0
int gssint_mechglue_init(void);
void gssint_mechglue_fini(void);
#endif
OM_uint32 gssint_select_mech_type(OM_uint32 *minor, gss_const_OID in_oid,
gss_OID *selected_oid);
gss_OID gssint_get_public_oid(gss_const_OID internal_oid);
OM_uint32 gssint_make_public_oid_set(OM_uint32 *minor_status, gss_OID oids,
int count, gss_OID_set *public_set);
gss_mechanism gssint_get_mechanism (gss_const_OID);
OM_uint32 gssint_get_mech_type (gss_OID, gss_buffer_t);
char *gssint_get_kmodName(const gss_OID);
char *gssint_get_modOptions(const gss_OID);
OM_uint32 gssint_import_internal_name (OM_uint32 *, gss_OID, gss_union_name_t,
gss_name_t *);
OM_uint32 gssint_export_internal_name(OM_uint32 *, const gss_OID,
const gss_name_t, gss_buffer_t);
OM_uint32 gssint_display_internal_name (OM_uint32 *, gss_OID, gss_name_t,
gss_buffer_t, gss_OID *);
OM_uint32 gssint_release_internal_name (OM_uint32 *, gss_OID, gss_name_t *);
OM_uint32 gssint_delete_internal_sec_context (OM_uint32 *, gss_OID,
gss_ctx_id_t *, gss_buffer_t);
#ifdef _GSS_STATIC_LINK
int gssint_register_mechinfo(gss_mech_info template);
#endif
OM_uint32 gssint_convert_name_to_union_name
(OM_uint32 *, /* minor_status */
gss_mechanism, /* mech */
gss_name_t, /* internal_name */
gss_name_t * /* external_name */
);
gss_cred_id_t gssint_get_mechanism_cred
(gss_union_cred_t, /* union_cred */
gss_OID /* mech_type */
);
OM_uint32 gssint_create_copy_buffer(
const gss_buffer_t, /* src buffer */
gss_buffer_t *, /* destination buffer */
int /* NULL terminate buffer ? */
);
OM_uint32 gssint_copy_oid_set(
OM_uint32 *, /* minor_status */
const gss_OID_set_desc * const, /* oid set */
gss_OID_set * /* new oid set */
);
gss_OID gss_find_mechanism_from_name_type (gss_OID); /* name_type */
OM_uint32 gss_add_mech_name_type
(OM_uint32 *, /* minor_status */
gss_OID, /* name_type */
gss_OID /* mech */
);
/*
* Sun extensions to GSS-API v2
*/
int
gssint_get_der_length(
unsigned char **, /* buf */
unsigned int, /* buf_len */
unsigned int * /* bytes */
);
unsigned int
gssint_der_length_size(unsigned int /* len */);
int
gssint_put_der_length(
unsigned int, /* length */
unsigned char **, /* buf */
unsigned int /* max_len */
);
OM_uint32
gssint_wrap_aead (gss_mechanism, /* mech */
OM_uint32 *, /* minor_status */
gss_union_ctx_id_t, /* ctx */
int, /* conf_req_flag */
gss_qop_t, /* qop_req_flag */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* input_payload_buffer */
int *, /* conf_state */
gss_buffer_t); /* output_message_buffer */
OM_uint32
gssint_unwrap_aead (gss_mechanism, /* mech */
OM_uint32 *, /* minor_status */
gss_union_ctx_id_t, /* ctx */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* input_assoc_buffer */
gss_buffer_t, /* output_payload_buffer */
int *, /* conf_state */
gss_qop_t *); /* qop_state */
/* Use this to map an error code that was returned from a mech
operation; the mech will be asked to produce the associated error
messages.
Remember that if the minor status code cannot be returned to the
caller (e.g., if it's stuffed in an automatic variable and then
ignored), then we don't care about producing a mapping. */
#define map_error(MINORP, MECH) \
(*(MINORP) = gssint_mecherrmap_map(*(MINORP), &(MECH)->mech_type))
#define map_error_oid(MINORP, MECHOID) \
(*(MINORP) = gssint_mecherrmap_map(*(MINORP), (MECHOID)))
/* Use this to map an errno value or com_err error code being
generated within the mechglue code (e.g., by calling generic oid
ops). Any errno or com_err values produced by mech operations
should be processed with map_error. This means they'll be stored
separately even if the mech uses com_err, because we can't assume
that it will use com_err. */
#define map_errcode(MINORP) \
(*(MINORP) = gssint_mecherrmap_map_errcode(*(MINORP)))
#endif /* _GSS_MECHGLUEP_H */
|
55,746,502,349,949 | /* lib/rpc/svc_auth_gss.c */
/*
Copyright (c) 2000 The Regents of the University of Michigan.
All rights reserved.
Copyright (c) 2000 Dug Song <dugsong@UMICH.EDU>.
All rights reserved, all wrongs reversed.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Id: svc_auth_gss.c,v 1.28 2002/10/15 21:29:36 kwc Exp
*/
#include "k5-platform.h"
#include <gssrpc/rpc.h>
#include <gssrpc/auth_gssapi.h>
#ifdef HAVE_HEIMDAL
#include <gssapi.h>
#define gss_nt_service_name GSS_C_NT_HOSTBASED_SERVICE
#else
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#endif
#ifdef DEBUG_GSSAPI
int svc_debug_gss = DEBUG_GSSAPI;
#endif
#ifdef SPKM
#ifndef OID_EQ
#define g_OID_equal(o1,o2) \
(((o1)->length == (o2)->length) && \
((o1)->elements != 0) && ((o2)->elements != 0) && \
(memcmp((o1)->elements,(o2)->elements,(int) (o1)->length) == 0))
#define OID_EQ 1
#endif /* OID_EQ */
extern const gss_OID_desc * const gss_mech_spkm3;
#endif /* SPKM */
extern SVCAUTH svc_auth_none;
/*
* from mit-krb5-1.2.1 mechglue/mglueP.h:
* Array of context IDs typed by mechanism OID
*/
typedef struct gss_union_ctx_id_t {
gss_OID mech_type;
gss_ctx_id_t internal_ctx_id;
} gss_union_ctx_id_desc, *gss_union_ctx_id_t;
static auth_gssapi_log_badauth_func log_badauth = NULL;
static caddr_t log_badauth_data = NULL;
static auth_gssapi_log_badauth2_func log_badauth2 = NULL;
static caddr_t log_badauth2_data = NULL;
static auth_gssapi_log_badverf_func log_badverf = NULL;
static caddr_t log_badverf_data = NULL;
static auth_gssapi_log_miscerr_func log_miscerr = NULL;
static caddr_t log_miscerr_data = NULL;
#define LOG_MISCERR(arg) if (log_miscerr) \
(*log_miscerr)(rqst, msg, arg, log_miscerr_data)
static bool_t svcauth_gss_destroy(SVCAUTH *);
static bool_t svcauth_gss_wrap(SVCAUTH *, XDR *, xdrproc_t, caddr_t);
static bool_t svcauth_gss_unwrap(SVCAUTH *, XDR *, xdrproc_t, caddr_t);
static bool_t svcauth_gss_nextverf(struct svc_req *, u_int);
struct svc_auth_ops svc_auth_gss_ops = {
svcauth_gss_wrap,
svcauth_gss_unwrap,
svcauth_gss_destroy
};
struct svc_rpc_gss_data {
bool_t established; /* context established */
gss_ctx_id_t ctx; /* context id */
struct rpc_gss_sec sec; /* security triple */
gss_buffer_desc cname; /* GSS client name */
u_int seq; /* sequence number */
u_int win; /* sequence window */
u_int seqlast; /* last sequence number */
uint32_t seqmask; /* bitmask of seqnums */
gss_name_t client_name; /* unparsed name string */
gss_buffer_desc checksum; /* so we can free it */
};
#define SVCAUTH_PRIVATE(auth) \
(*(struct svc_rpc_gss_data **)&(auth)->svc_ah_private)
/* Global server credentials. */
gss_cred_id_t svcauth_gss_creds;
static gss_name_t svcauth_gss_name = NULL;
bool_t
svcauth_gss_set_svc_name(gss_name_t name)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_set_svc_name()");
if (svcauth_gss_name != NULL) {
maj_stat = gss_release_name(&min_stat, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_name", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_name = NULL;
}
if (svcauth_gss_name == GSS_C_NO_NAME)
return (TRUE);
maj_stat = gss_duplicate_name(&min_stat, name, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_duplicate_name", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_acquire_cred(void)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_acquire_cred()");
maj_stat = gss_acquire_cred(&min_stat, svcauth_gss_name, 0,
GSS_C_NULL_OID_SET, GSS_C_ACCEPT,
&svcauth_gss_creds, NULL, NULL);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_acquire_cred", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_release_cred(void)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_release_cred()");
maj_stat = gss_release_cred(&min_stat, &svcauth_gss_creds);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_cred", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_creds = NULL;
return (TRUE);
}
/* Invoke log_badauth callbacks for an authentication failure. */
static void
badauth(OM_uint32 maj, OM_uint32 minor, SVCXPRT *xprt)
{
if (log_badauth != NULL)
(*log_badauth)(maj, minor, &xprt->xp_raddr, log_badauth_data);
if (log_badauth2 != NULL)
(*log_badauth2)(maj, minor, xprt, log_badauth2_data);
}
static bool_t
svcauth_gss_accept_sec_context(struct svc_req *rqst,
struct rpc_gss_init_res *gr)
{
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
gss_buffer_desc recv_tok, seqbuf;
gss_OID mech;
OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq;
log_debug("in svcauth_gss_accept_context()");
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gr, 0, sizeof(*gr));
/* Deserialize arguments. */
memset(&recv_tok, 0, sizeof(recv_tok));
if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args,
(caddr_t)&recv_tok))
return (FALSE);
gr->gr_major = gss_accept_sec_context(&gr->gr_minor,
&gd->ctx,
svcauth_gss_creds,
&recv_tok,
GSS_C_NO_CHANNEL_BINDINGS,
&gd->client_name,
&mech,
&gr->gr_token,
&ret_flags,
NULL,
NULL);
svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok);
log_status("accept_sec_context", gr->gr_major, gr->gr_minor);
if (gr->gr_major != GSS_S_COMPLETE &&
gr->gr_major != GSS_S_CONTINUE_NEEDED) {
badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt);
gd->ctx = GSS_C_NO_CONTEXT;
goto errout;
}
/*
* ANDROS: krb5 mechglue returns ctx of size 8 - two pointers,
* one to the mechanism oid, one to the internal_ctx_id
*/
if ((gr->gr_ctx.value = mem_alloc(sizeof(gss_union_ctx_id_desc))) == NULL) {
fprintf(stderr, "svcauth_gss_accept_context: out of memory\n");
goto errout;
}
memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc));
gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc);
/* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */
gr->gr_win = sizeof(gd->seqmask) * 8;
/* Save client info. */
gd->sec.mech = mech;
gd->sec.qop = GSS_C_QOP_DEFAULT;
gd->sec.svc = gc->gc_svc;
gd->seq = gc->gc_seq;
gd->win = gr->gr_win;
if (gr->gr_major == GSS_S_COMPLETE) {
#ifdef SPKM
/* spkm3: no src_name (anonymous) */
if(!g_OID_equal(gss_mech_spkm3, mech)) {
#endif
maj_stat = gss_display_name(&min_stat, gd->client_name,
&gd->cname, &gd->sec.mech);
#ifdef SPKM
}
#endif
if (maj_stat != GSS_S_COMPLETE) {
log_status("display_name", maj_stat, min_stat);
goto errout;
}
#ifdef DEBUG
#ifdef HAVE_HEIMDAL
log_debug("accepted context for %.*s with "
"<mech {}, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
gd->sec.qop, gd->sec.svc);
#else
{
gss_buffer_desc mechname;
gss_oid_to_str(&min_stat, mech, &mechname);
log_debug("accepted context for %.*s with "
"<mech %.*s, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
mechname.length, (char *)mechname.value,
gd->sec.qop, gd->sec.svc);
gss_release_buffer(&min_stat, &mechname);
}
#endif
#endif /* DEBUG */
seq = htonl(gr->gr_win);
seqbuf.value = &seq;
seqbuf.length = sizeof(seq);
gss_release_buffer(&min_stat, &gd->checksum);
maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT,
&seqbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
goto errout;
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length;
}
return (TRUE);
errout:
gss_release_buffer(&min_stat, &gr->gr_token);
return (FALSE);
}
static bool_t
svcauth_gss_validate(struct svc_req *rqst, struct svc_rpc_gss_data *gd, struct rpc_msg *msg)
{
struct opaque_auth *oa;
gss_buffer_desc rpcbuf, checksum;
OM_uint32 maj_stat, min_stat, qop_state;
u_char rpchdr[128];
int32_t *buf;
log_debug("in svcauth_gss_validate()");
memset(rpchdr, 0, sizeof(rpchdr));
/* XXX - Reconstruct RPC header for signing (from xdr_callmsg). */
oa = &msg->rm_call.cb_cred;
if (oa->oa_length > MAX_AUTH_BYTES)
return (FALSE);
/* 8 XDR units from the IXDR macro calls. */
if (sizeof(rpchdr) < (8 * BYTES_PER_XDR_UNIT +
RNDUP(oa->oa_length)))
return (FALSE);
buf = (int32_t *)(void *)rpchdr;
IXDR_PUT_LONG(buf, msg->rm_xid);
IXDR_PUT_ENUM(buf, msg->rm_direction);
IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers);
IXDR_PUT_LONG(buf, msg->rm_call.cb_prog);
IXDR_PUT_LONG(buf, msg->rm_call.cb_vers);
IXDR_PUT_LONG(buf, msg->rm_call.cb_proc);
IXDR_PUT_ENUM(buf, oa->oa_flavor);
IXDR_PUT_LONG(buf, oa->oa_length);
if (oa->oa_length) {
memcpy((caddr_t)buf, oa->oa_base, oa->oa_length);
buf += RNDUP(oa->oa_length) / sizeof(int32_t);
}
rpcbuf.value = rpchdr;
rpcbuf.length = (u_char *)buf - rpchdr;
checksum.value = msg->rm_call.cb_verf.oa_base;
checksum.length = msg->rm_call.cb_verf.oa_length;
maj_stat = gss_verify_mic(&min_stat, gd->ctx, &rpcbuf, &checksum,
&qop_state);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_verify_mic", maj_stat, min_stat);
if (log_badverf != NULL)
(*log_badverf)(gd->client_name,
svcauth_gss_name,
rqst, msg, log_badverf_data);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_nextverf(struct svc_req *rqst, u_int num)
{
struct svc_rpc_gss_data *gd;
gss_buffer_desc signbuf;
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_nextverf()");
if (rqst->rq_xprt->xp_auth == NULL)
return (FALSE);
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gss_release_buffer(&min_stat, &gd->checksum);
signbuf.value = #
signbuf.length = sizeof(num);
maj_stat = gss_get_mic(&min_stat, gd->ctx, gd->sec.qop,
&signbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_get_mic", maj_stat, min_stat);
return (FALSE);
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = (caddr_t)gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = (u_int)gd->checksum.length;
return (TRUE);
}
enum auth_stat
gssrpc__svcauth_gss(struct svc_req *rqst, struct rpc_msg *msg,
bool_t *no_dispatch)
{
enum auth_stat retstat;
XDR xdrs;
SVCAUTH *auth;
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
struct rpc_gss_init_res gr;
int call_stat, offset;
OM_uint32 min_stat;
log_debug("in svcauth_gss()");
/* Initialize reply. */
rqst->rq_xprt->xp_verf = gssrpc__null_auth;
/* Allocate and set up server auth handle. */
if (rqst->rq_xprt->xp_auth == NULL ||
rqst->rq_xprt->xp_auth == &svc_auth_none) {
if ((auth = calloc(sizeof(*auth), 1)) == NULL) {
fprintf(stderr, "svcauth_gss: out_of_memory\n");
return (AUTH_FAILED);
}
if ((gd = calloc(sizeof(*gd), 1)) == NULL) {
fprintf(stderr, "svcauth_gss: out_of_memory\n");
return (AUTH_FAILED);
}
auth->svc_ah_ops = &svc_auth_gss_ops;
SVCAUTH_PRIVATE(auth) = gd;
rqst->rq_xprt->xp_auth = auth;
}
else gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
log_debug("xp_auth=%p, gd=%p", rqst->rq_xprt->xp_auth, gd);
/* Deserialize client credentials. */
if (rqst->rq_cred.oa_length <= 0)
return (AUTH_BADCRED);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gc, 0, sizeof(*gc));
log_debug("calling xdrmem_create()");
log_debug("oa_base=%p, oa_length=%u", rqst->rq_cred.oa_base,
rqst->rq_cred.oa_length);
xdrmem_create(&xdrs, rqst->rq_cred.oa_base,
rqst->rq_cred.oa_length, XDR_DECODE);
log_debug("xdrmem_create() returned");
if (!xdr_rpc_gss_cred(&xdrs, gc)) {
log_debug("xdr_rpc_gss_cred() failed");
XDR_DESTROY(&xdrs);
return (AUTH_BADCRED);
}
XDR_DESTROY(&xdrs);
retstat = AUTH_FAILED;
#define ret_freegc(code) do { retstat = code; goto freegc; } while (0)
/* Check version. */
if (gc->gc_v != RPCSEC_GSS_VERSION)
ret_freegc (AUTH_BADCRED);
/* Check RPCSEC_GSS service. */
if (gc->gc_svc != RPCSEC_GSS_SVC_NONE &&
gc->gc_svc != RPCSEC_GSS_SVC_INTEGRITY &&
gc->gc_svc != RPCSEC_GSS_SVC_PRIVACY)
ret_freegc (AUTH_BADCRED);
/* Check sequence number. */
if (gd->established) {
if (gc->gc_seq > MAXSEQ)
ret_freegc (RPCSEC_GSS_CTXPROBLEM);
if ((offset = gd->seqlast - gc->gc_seq) < 0) {
gd->seqlast = gc->gc_seq;
offset = 0 - offset;
gd->seqmask <<= offset;
offset = 0;
} else if ((u_int)offset >= gd->win ||
(gd->seqmask & (1 << offset))) {
*no_dispatch = 1;
ret_freegc (RPCSEC_GSS_CTXPROBLEM);
}
gd->seq = gc->gc_seq;
gd->seqmask |= (1 << offset);
}
if (gd->established) {
rqst->rq_clntname = (char *)gd->client_name;
rqst->rq_svccred = (char *)gd->ctx;
}
/* Handle RPCSEC_GSS control procedure. */
switch (gc->gc_proc) {
case RPCSEC_GSS_INIT:
case RPCSEC_GSS_CONTINUE_INIT:
if (rqst->rq_proc != NULLPROC)
ret_freegc (AUTH_FAILED); /* XXX ? */
if (!svcauth_gss_acquire_cred())
ret_freegc (AUTH_FAILED);
if (!svcauth_gss_accept_sec_context(rqst, &gr))
ret_freegc (AUTH_REJECTEDCRED);
if (!svcauth_gss_nextverf(rqst, htonl(gr.gr_win))) {
gss_release_buffer(&min_stat, &gr.gr_token);
mem_free(gr.gr_ctx.value,
sizeof(gss_union_ctx_id_desc));
ret_freegc (AUTH_FAILED);
}
*no_dispatch = TRUE;
call_stat = svc_sendreply(rqst->rq_xprt, xdr_rpc_gss_init_res,
(caddr_t)&gr);
gss_release_buffer(&min_stat, &gr.gr_token);
gss_release_buffer(&min_stat, &gd->checksum);
mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc));
if (!call_stat)
ret_freegc (AUTH_FAILED);
if (gr.gr_major == GSS_S_COMPLETE)
gd->established = TRUE;
break;
case RPCSEC_GSS_DATA:
if (!svcauth_gss_validate(rqst, gd, msg))
ret_freegc (RPCSEC_GSS_CREDPROBLEM);
if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq)))
ret_freegc (AUTH_FAILED);
break;
case RPCSEC_GSS_DESTROY:
if (rqst->rq_proc != NULLPROC)
ret_freegc (AUTH_FAILED); /* XXX ? */
if (!svcauth_gss_validate(rqst, gd, msg))
ret_freegc (RPCSEC_GSS_CREDPROBLEM);
if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq)))
ret_freegc (AUTH_FAILED);
*no_dispatch = TRUE;
call_stat = svc_sendreply(rqst->rq_xprt,
xdr_void, (caddr_t)NULL);
log_debug("sendreply in destroy: %d", call_stat);
if (!svcauth_gss_release_cred())
ret_freegc (AUTH_FAILED);
SVCAUTH_DESTROY(rqst->rq_xprt->xp_auth);
rqst->rq_xprt->xp_auth = &svc_auth_none;
break;
default:
ret_freegc (AUTH_REJECTEDCRED);
break;
}
retstat = AUTH_OK;
freegc:
xdr_free(xdr_rpc_gss_cred, gc);
log_debug("returning %d from svcauth_gss()", retstat);
return (retstat);
}
static bool_t
svcauth_gss_destroy(SVCAUTH *auth)
{
struct svc_rpc_gss_data *gd;
OM_uint32 min_stat;
log_debug("in svcauth_gss_destroy()");
gd = SVCAUTH_PRIVATE(auth);
gss_delete_sec_context(&min_stat, &gd->ctx, GSS_C_NO_BUFFER);
gss_release_buffer(&min_stat, &gd->cname);
gss_release_buffer(&min_stat, &gd->checksum);
if (gd->client_name)
gss_release_name(&min_stat, &gd->client_name);
mem_free(gd, sizeof(*gd));
mem_free(auth, sizeof(*auth));
return (TRUE);
}
static bool_t
svcauth_gss_wrap(SVCAUTH *auth, XDR *xdrs, xdrproc_t xdr_func, caddr_t xdr_ptr)
{
struct svc_rpc_gss_data *gd;
log_debug("in svcauth_gss_wrap()");
gd = SVCAUTH_PRIVATE(auth);
if (!gd->established || gd->sec.svc == RPCSEC_GSS_SVC_NONE) {
return ((*xdr_func)(xdrs, xdr_ptr));
}
return (xdr_rpc_gss_data(xdrs, xdr_func, xdr_ptr,
gd->ctx, gd->sec.qop,
gd->sec.svc, gd->seq));
}
static bool_t
svcauth_gss_unwrap(SVCAUTH *auth, XDR *xdrs, xdrproc_t xdr_func, caddr_t xdr_ptr)
{
struct svc_rpc_gss_data *gd;
log_debug("in svcauth_gss_unwrap()");
gd = SVCAUTH_PRIVATE(auth);
if (!gd->established || gd->sec.svc == RPCSEC_GSS_SVC_NONE) {
return ((*xdr_func)(xdrs, xdr_ptr));
}
return (xdr_rpc_gss_data(xdrs, xdr_func, xdr_ptr,
gd->ctx, gd->sec.qop,
gd->sec.svc, gd->seq));
}
char *
svcauth_gss_get_principal(SVCAUTH *auth)
{
struct svc_rpc_gss_data *gd;
char *pname;
gd = SVCAUTH_PRIVATE(auth);
if (gd->cname.length == 0 || gd->cname.length >= SIZE_MAX)
return (NULL);
if ((pname = malloc(gd->cname.length + 1)) == NULL)
return (NULL);
memcpy(pname, gd->cname.value, gd->cname.length);
pname[gd->cname.length] = '\0';
return (pname);
}
/*
* Function: svcauth_gss_set_log_badauth_func
*
* Purpose: sets the logging function called when an invalid RPC call
* arrives
*
* See functional specifications.
*/
void svcauth_gss_set_log_badauth_func(
auth_gssapi_log_badauth_func func,
caddr_t data)
{
log_badauth = func;
log_badauth_data = data;
}
void
svcauth_gss_set_log_badauth2_func(auth_gssapi_log_badauth2_func func,
caddr_t data)
{
log_badauth2 = func;
log_badauth2_data = data;
}
/*
* Function: svcauth_gss_set_log_badverf_func
*
* Purpose: sets the logging function called when an invalid RPC call
* arrives
*
* See functional specifications.
*/
void svcauth_gss_set_log_badverf_func(
auth_gssapi_log_badverf_func func,
caddr_t data)
{
log_badverf = func;
log_badverf_data = data;
}
/*
* Function: svcauth_gss_set_log_miscerr_func
*
* Purpose: sets the logging function called when a miscellaneous
* AUTH_GSSAPI error occurs
*
* See functional specifications.
*/
void svcauth_gss_set_log_miscerr_func(
auth_gssapi_log_miscerr_func func,
caddr_t data)
{
log_miscerr = func;
log_miscerr_data = data;
}
| /* lib/rpc/svc_auth_gss.c */
/*
Copyright (c) 2000 The Regents of the University of Michigan.
All rights reserved.
Copyright (c) 2000 Dug Song <dugsong@UMICH.EDU>.
All rights reserved, all wrongs reversed.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Id: svc_auth_gss.c,v 1.28 2002/10/15 21:29:36 kwc Exp
*/
#include "k5-platform.h"
#include <gssrpc/rpc.h>
#include <gssrpc/auth_gssapi.h>
#ifdef HAVE_HEIMDAL
#include <gssapi.h>
#define gss_nt_service_name GSS_C_NT_HOSTBASED_SERVICE
#else
#include <gssapi/gssapi.h>
#include <gssapi/gssapi_generic.h>
#endif
#ifdef DEBUG_GSSAPI
int svc_debug_gss = DEBUG_GSSAPI;
#endif
#ifdef SPKM
#ifndef OID_EQ
#define g_OID_equal(o1,o2) \
(((o1)->length == (o2)->length) && \
((o1)->elements != 0) && ((o2)->elements != 0) && \
(memcmp((o1)->elements,(o2)->elements,(int) (o1)->length) == 0))
#define OID_EQ 1
#endif /* OID_EQ */
extern const gss_OID_desc * const gss_mech_spkm3;
#endif /* SPKM */
extern SVCAUTH svc_auth_none;
static auth_gssapi_log_badauth_func log_badauth = NULL;
static caddr_t log_badauth_data = NULL;
static auth_gssapi_log_badauth2_func log_badauth2 = NULL;
static caddr_t log_badauth2_data = NULL;
static auth_gssapi_log_badverf_func log_badverf = NULL;
static caddr_t log_badverf_data = NULL;
static auth_gssapi_log_miscerr_func log_miscerr = NULL;
static caddr_t log_miscerr_data = NULL;
#define LOG_MISCERR(arg) if (log_miscerr) \
(*log_miscerr)(rqst, msg, arg, log_miscerr_data)
static bool_t svcauth_gss_destroy(SVCAUTH *);
static bool_t svcauth_gss_wrap(SVCAUTH *, XDR *, xdrproc_t, caddr_t);
static bool_t svcauth_gss_unwrap(SVCAUTH *, XDR *, xdrproc_t, caddr_t);
static bool_t svcauth_gss_nextverf(struct svc_req *, u_int);
struct svc_auth_ops svc_auth_gss_ops = {
svcauth_gss_wrap,
svcauth_gss_unwrap,
svcauth_gss_destroy
};
struct svc_rpc_gss_data {
bool_t established; /* context established */
gss_ctx_id_t ctx; /* context id */
struct rpc_gss_sec sec; /* security triple */
gss_buffer_desc cname; /* GSS client name */
u_int seq; /* sequence number */
u_int win; /* sequence window */
u_int seqlast; /* last sequence number */
uint32_t seqmask; /* bitmask of seqnums */
gss_name_t client_name; /* unparsed name string */
gss_buffer_desc checksum; /* so we can free it */
};
#define SVCAUTH_PRIVATE(auth) \
(*(struct svc_rpc_gss_data **)&(auth)->svc_ah_private)
/* Global server credentials. */
gss_cred_id_t svcauth_gss_creds;
static gss_name_t svcauth_gss_name = NULL;
bool_t
svcauth_gss_set_svc_name(gss_name_t name)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_set_svc_name()");
if (svcauth_gss_name != NULL) {
maj_stat = gss_release_name(&min_stat, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_name", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_name = NULL;
}
if (svcauth_gss_name == GSS_C_NO_NAME)
return (TRUE);
maj_stat = gss_duplicate_name(&min_stat, name, &svcauth_gss_name);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_duplicate_name", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_acquire_cred(void)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_acquire_cred()");
maj_stat = gss_acquire_cred(&min_stat, svcauth_gss_name, 0,
GSS_C_NULL_OID_SET, GSS_C_ACCEPT,
&svcauth_gss_creds, NULL, NULL);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_acquire_cred", maj_stat, min_stat);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_release_cred(void)
{
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_release_cred()");
maj_stat = gss_release_cred(&min_stat, &svcauth_gss_creds);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_release_cred", maj_stat, min_stat);
return (FALSE);
}
svcauth_gss_creds = NULL;
return (TRUE);
}
/* Invoke log_badauth callbacks for an authentication failure. */
static void
badauth(OM_uint32 maj, OM_uint32 minor, SVCXPRT *xprt)
{
if (log_badauth != NULL)
(*log_badauth)(maj, minor, &xprt->xp_raddr, log_badauth_data);
if (log_badauth2 != NULL)
(*log_badauth2)(maj, minor, xprt, log_badauth2_data);
}
static bool_t
svcauth_gss_accept_sec_context(struct svc_req *rqst,
struct rpc_gss_init_res *gr)
{
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
gss_buffer_desc recv_tok, seqbuf;
gss_OID mech;
OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq;
log_debug("in svcauth_gss_accept_context()");
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gr, 0, sizeof(*gr));
/* Deserialize arguments. */
memset(&recv_tok, 0, sizeof(recv_tok));
if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args,
(caddr_t)&recv_tok))
return (FALSE);
gr->gr_major = gss_accept_sec_context(&gr->gr_minor,
&gd->ctx,
svcauth_gss_creds,
&recv_tok,
GSS_C_NO_CHANNEL_BINDINGS,
&gd->client_name,
&mech,
&gr->gr_token,
&ret_flags,
NULL,
NULL);
svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok);
log_status("accept_sec_context", gr->gr_major, gr->gr_minor);
if (gr->gr_major != GSS_S_COMPLETE &&
gr->gr_major != GSS_S_CONTINUE_NEEDED) {
badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt);
gd->ctx = GSS_C_NO_CONTEXT;
goto errout;
}
gr->gr_ctx.value = "xxxx";
gr->gr_ctx.length = 4;
/* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */
gr->gr_win = sizeof(gd->seqmask) * 8;
/* Save client info. */
gd->sec.mech = mech;
gd->sec.qop = GSS_C_QOP_DEFAULT;
gd->sec.svc = gc->gc_svc;
gd->seq = gc->gc_seq;
gd->win = gr->gr_win;
if (gr->gr_major == GSS_S_COMPLETE) {
#ifdef SPKM
/* spkm3: no src_name (anonymous) */
if(!g_OID_equal(gss_mech_spkm3, mech)) {
#endif
maj_stat = gss_display_name(&min_stat, gd->client_name,
&gd->cname, &gd->sec.mech);
#ifdef SPKM
}
#endif
if (maj_stat != GSS_S_COMPLETE) {
log_status("display_name", maj_stat, min_stat);
goto errout;
}
#ifdef DEBUG
#ifdef HAVE_HEIMDAL
log_debug("accepted context for %.*s with "
"<mech {}, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
gd->sec.qop, gd->sec.svc);
#else
{
gss_buffer_desc mechname;
gss_oid_to_str(&min_stat, mech, &mechname);
log_debug("accepted context for %.*s with "
"<mech %.*s, qop %d, svc %d>",
gd->cname.length, (char *)gd->cname.value,
mechname.length, (char *)mechname.value,
gd->sec.qop, gd->sec.svc);
gss_release_buffer(&min_stat, &mechname);
}
#endif
#endif /* DEBUG */
seq = htonl(gr->gr_win);
seqbuf.value = &seq;
seqbuf.length = sizeof(seq);
gss_release_buffer(&min_stat, &gd->checksum);
maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT,
&seqbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
goto errout;
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length;
}
return (TRUE);
errout:
gss_release_buffer(&min_stat, &gr->gr_token);
return (FALSE);
}
static bool_t
svcauth_gss_validate(struct svc_req *rqst, struct svc_rpc_gss_data *gd, struct rpc_msg *msg)
{
struct opaque_auth *oa;
gss_buffer_desc rpcbuf, checksum;
OM_uint32 maj_stat, min_stat, qop_state;
u_char rpchdr[128];
int32_t *buf;
log_debug("in svcauth_gss_validate()");
memset(rpchdr, 0, sizeof(rpchdr));
/* XXX - Reconstruct RPC header for signing (from xdr_callmsg). */
oa = &msg->rm_call.cb_cred;
if (oa->oa_length > MAX_AUTH_BYTES)
return (FALSE);
/* 8 XDR units from the IXDR macro calls. */
if (sizeof(rpchdr) < (8 * BYTES_PER_XDR_UNIT +
RNDUP(oa->oa_length)))
return (FALSE);
buf = (int32_t *)(void *)rpchdr;
IXDR_PUT_LONG(buf, msg->rm_xid);
IXDR_PUT_ENUM(buf, msg->rm_direction);
IXDR_PUT_LONG(buf, msg->rm_call.cb_rpcvers);
IXDR_PUT_LONG(buf, msg->rm_call.cb_prog);
IXDR_PUT_LONG(buf, msg->rm_call.cb_vers);
IXDR_PUT_LONG(buf, msg->rm_call.cb_proc);
IXDR_PUT_ENUM(buf, oa->oa_flavor);
IXDR_PUT_LONG(buf, oa->oa_length);
if (oa->oa_length) {
memcpy((caddr_t)buf, oa->oa_base, oa->oa_length);
buf += RNDUP(oa->oa_length) / sizeof(int32_t);
}
rpcbuf.value = rpchdr;
rpcbuf.length = (u_char *)buf - rpchdr;
checksum.value = msg->rm_call.cb_verf.oa_base;
checksum.length = msg->rm_call.cb_verf.oa_length;
maj_stat = gss_verify_mic(&min_stat, gd->ctx, &rpcbuf, &checksum,
&qop_state);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_verify_mic", maj_stat, min_stat);
if (log_badverf != NULL)
(*log_badverf)(gd->client_name,
svcauth_gss_name,
rqst, msg, log_badverf_data);
return (FALSE);
}
return (TRUE);
}
static bool_t
svcauth_gss_nextverf(struct svc_req *rqst, u_int num)
{
struct svc_rpc_gss_data *gd;
gss_buffer_desc signbuf;
OM_uint32 maj_stat, min_stat;
log_debug("in svcauth_gss_nextverf()");
if (rqst->rq_xprt->xp_auth == NULL)
return (FALSE);
gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
gss_release_buffer(&min_stat, &gd->checksum);
signbuf.value = #
signbuf.length = sizeof(num);
maj_stat = gss_get_mic(&min_stat, gd->ctx, gd->sec.qop,
&signbuf, &gd->checksum);
if (maj_stat != GSS_S_COMPLETE) {
log_status("gss_get_mic", maj_stat, min_stat);
return (FALSE);
}
rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS;
rqst->rq_xprt->xp_verf.oa_base = (caddr_t)gd->checksum.value;
rqst->rq_xprt->xp_verf.oa_length = (u_int)gd->checksum.length;
return (TRUE);
}
enum auth_stat
gssrpc__svcauth_gss(struct svc_req *rqst, struct rpc_msg *msg,
bool_t *no_dispatch)
{
enum auth_stat retstat;
XDR xdrs;
SVCAUTH *auth;
struct svc_rpc_gss_data *gd;
struct rpc_gss_cred *gc;
struct rpc_gss_init_res gr;
int call_stat, offset;
OM_uint32 min_stat;
log_debug("in svcauth_gss()");
/* Initialize reply. */
rqst->rq_xprt->xp_verf = gssrpc__null_auth;
/* Allocate and set up server auth handle. */
if (rqst->rq_xprt->xp_auth == NULL ||
rqst->rq_xprt->xp_auth == &svc_auth_none) {
if ((auth = calloc(sizeof(*auth), 1)) == NULL) {
fprintf(stderr, "svcauth_gss: out_of_memory\n");
return (AUTH_FAILED);
}
if ((gd = calloc(sizeof(*gd), 1)) == NULL) {
fprintf(stderr, "svcauth_gss: out_of_memory\n");
return (AUTH_FAILED);
}
auth->svc_ah_ops = &svc_auth_gss_ops;
SVCAUTH_PRIVATE(auth) = gd;
rqst->rq_xprt->xp_auth = auth;
}
else gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth);
log_debug("xp_auth=%p, gd=%p", rqst->rq_xprt->xp_auth, gd);
/* Deserialize client credentials. */
if (rqst->rq_cred.oa_length <= 0)
return (AUTH_BADCRED);
gc = (struct rpc_gss_cred *)rqst->rq_clntcred;
memset(gc, 0, sizeof(*gc));
log_debug("calling xdrmem_create()");
log_debug("oa_base=%p, oa_length=%u", rqst->rq_cred.oa_base,
rqst->rq_cred.oa_length);
xdrmem_create(&xdrs, rqst->rq_cred.oa_base,
rqst->rq_cred.oa_length, XDR_DECODE);
log_debug("xdrmem_create() returned");
if (!xdr_rpc_gss_cred(&xdrs, gc)) {
log_debug("xdr_rpc_gss_cred() failed");
XDR_DESTROY(&xdrs);
return (AUTH_BADCRED);
}
XDR_DESTROY(&xdrs);
retstat = AUTH_FAILED;
#define ret_freegc(code) do { retstat = code; goto freegc; } while (0)
/* Check version. */
if (gc->gc_v != RPCSEC_GSS_VERSION)
ret_freegc (AUTH_BADCRED);
/* Check RPCSEC_GSS service. */
if (gc->gc_svc != RPCSEC_GSS_SVC_NONE &&
gc->gc_svc != RPCSEC_GSS_SVC_INTEGRITY &&
gc->gc_svc != RPCSEC_GSS_SVC_PRIVACY)
ret_freegc (AUTH_BADCRED);
/* Check sequence number. */
if (gd->established) {
if (gc->gc_seq > MAXSEQ)
ret_freegc (RPCSEC_GSS_CTXPROBLEM);
if ((offset = gd->seqlast - gc->gc_seq) < 0) {
gd->seqlast = gc->gc_seq;
offset = 0 - offset;
gd->seqmask <<= offset;
offset = 0;
} else if ((u_int)offset >= gd->win ||
(gd->seqmask & (1 << offset))) {
*no_dispatch = 1;
ret_freegc (RPCSEC_GSS_CTXPROBLEM);
}
gd->seq = gc->gc_seq;
gd->seqmask |= (1 << offset);
}
if (gd->established) {
rqst->rq_clntname = (char *)gd->client_name;
rqst->rq_svccred = (char *)gd->ctx;
}
/* Handle RPCSEC_GSS control procedure. */
switch (gc->gc_proc) {
case RPCSEC_GSS_INIT:
case RPCSEC_GSS_CONTINUE_INIT:
if (rqst->rq_proc != NULLPROC)
ret_freegc (AUTH_FAILED); /* XXX ? */
if (!svcauth_gss_acquire_cred())
ret_freegc (AUTH_FAILED);
if (!svcauth_gss_accept_sec_context(rqst, &gr))
ret_freegc (AUTH_REJECTEDCRED);
if (!svcauth_gss_nextverf(rqst, htonl(gr.gr_win))) {
gss_release_buffer(&min_stat, &gr.gr_token);
ret_freegc (AUTH_FAILED);
}
*no_dispatch = TRUE;
call_stat = svc_sendreply(rqst->rq_xprt, xdr_rpc_gss_init_res,
(caddr_t)&gr);
gss_release_buffer(&min_stat, &gr.gr_token);
gss_release_buffer(&min_stat, &gd->checksum);
if (!call_stat)
ret_freegc (AUTH_FAILED);
if (gr.gr_major == GSS_S_COMPLETE)
gd->established = TRUE;
break;
case RPCSEC_GSS_DATA:
if (!svcauth_gss_validate(rqst, gd, msg))
ret_freegc (RPCSEC_GSS_CREDPROBLEM);
if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq)))
ret_freegc (AUTH_FAILED);
break;
case RPCSEC_GSS_DESTROY:
if (rqst->rq_proc != NULLPROC)
ret_freegc (AUTH_FAILED); /* XXX ? */
if (!svcauth_gss_validate(rqst, gd, msg))
ret_freegc (RPCSEC_GSS_CREDPROBLEM);
if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq)))
ret_freegc (AUTH_FAILED);
*no_dispatch = TRUE;
call_stat = svc_sendreply(rqst->rq_xprt,
xdr_void, (caddr_t)NULL);
log_debug("sendreply in destroy: %d", call_stat);
if (!svcauth_gss_release_cred())
ret_freegc (AUTH_FAILED);
SVCAUTH_DESTROY(rqst->rq_xprt->xp_auth);
rqst->rq_xprt->xp_auth = &svc_auth_none;
break;
default:
ret_freegc (AUTH_REJECTEDCRED);
break;
}
retstat = AUTH_OK;
freegc:
xdr_free(xdr_rpc_gss_cred, gc);
log_debug("returning %d from svcauth_gss()", retstat);
return (retstat);
}
static bool_t
svcauth_gss_destroy(SVCAUTH *auth)
{
struct svc_rpc_gss_data *gd;
OM_uint32 min_stat;
log_debug("in svcauth_gss_destroy()");
gd = SVCAUTH_PRIVATE(auth);
gss_delete_sec_context(&min_stat, &gd->ctx, GSS_C_NO_BUFFER);
gss_release_buffer(&min_stat, &gd->cname);
gss_release_buffer(&min_stat, &gd->checksum);
if (gd->client_name)
gss_release_name(&min_stat, &gd->client_name);
mem_free(gd, sizeof(*gd));
mem_free(auth, sizeof(*auth));
return (TRUE);
}
static bool_t
svcauth_gss_wrap(SVCAUTH *auth, XDR *xdrs, xdrproc_t xdr_func, caddr_t xdr_ptr)
{
struct svc_rpc_gss_data *gd;
log_debug("in svcauth_gss_wrap()");
gd = SVCAUTH_PRIVATE(auth);
if (!gd->established || gd->sec.svc == RPCSEC_GSS_SVC_NONE) {
return ((*xdr_func)(xdrs, xdr_ptr));
}
return (xdr_rpc_gss_data(xdrs, xdr_func, xdr_ptr,
gd->ctx, gd->sec.qop,
gd->sec.svc, gd->seq));
}
static bool_t
svcauth_gss_unwrap(SVCAUTH *auth, XDR *xdrs, xdrproc_t xdr_func, caddr_t xdr_ptr)
{
struct svc_rpc_gss_data *gd;
log_debug("in svcauth_gss_unwrap()");
gd = SVCAUTH_PRIVATE(auth);
if (!gd->established || gd->sec.svc == RPCSEC_GSS_SVC_NONE) {
return ((*xdr_func)(xdrs, xdr_ptr));
}
return (xdr_rpc_gss_data(xdrs, xdr_func, xdr_ptr,
gd->ctx, gd->sec.qop,
gd->sec.svc, gd->seq));
}
char *
svcauth_gss_get_principal(SVCAUTH *auth)
{
struct svc_rpc_gss_data *gd;
char *pname;
gd = SVCAUTH_PRIVATE(auth);
if (gd->cname.length == 0 || gd->cname.length >= SIZE_MAX)
return (NULL);
if ((pname = malloc(gd->cname.length + 1)) == NULL)
return (NULL);
memcpy(pname, gd->cname.value, gd->cname.length);
pname[gd->cname.length] = '\0';
return (pname);
}
/*
* Function: svcauth_gss_set_log_badauth_func
*
* Purpose: sets the logging function called when an invalid RPC call
* arrives
*
* See functional specifications.
*/
void svcauth_gss_set_log_badauth_func(
auth_gssapi_log_badauth_func func,
caddr_t data)
{
log_badauth = func;
log_badauth_data = data;
}
void
svcauth_gss_set_log_badauth2_func(auth_gssapi_log_badauth2_func func,
caddr_t data)
{
log_badauth2 = func;
log_badauth2_data = data;
}
/*
* Function: svcauth_gss_set_log_badverf_func
*
* Purpose: sets the logging function called when an invalid RPC call
* arrives
*
* See functional specifications.
*/
void svcauth_gss_set_log_badverf_func(
auth_gssapi_log_badverf_func func,
caddr_t data)
{
log_badverf = func;
log_badverf_data = data;
}
/*
* Function: svcauth_gss_set_log_miscerr_func
*
* Purpose: sets the logging function called when a miscellaneous
* AUTH_GSSAPI error occurs
*
* See functional specifications.
*/
void svcauth_gss_set_log_miscerr_func(
auth_gssapi_log_miscerr_func func,
caddr_t data)
{
log_miscerr = func;
log_miscerr_data = data;
}
|
218,972,346,495,359 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/krb/bld_princ.c - Build a principal from a list of strings */
/*
* Copyright 1991 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "k5-int.h"
static krb5_error_code
build_principal_va(krb5_context context, krb5_principal princ,
unsigned int rlen, const char *realm, va_list ap)
{
krb5_error_code retval = 0;
char *r = NULL;
krb5_data *data = NULL;
krb5_int32 count = 0;
krb5_int32 size = 2; /* initial guess at needed space */
char *component = NULL;
data = malloc(size * sizeof(krb5_data));
if (!data) { retval = ENOMEM; }
if (!retval) {
r = strdup(realm);
if (!r) { retval = ENOMEM; }
}
while (!retval && (component = va_arg(ap, char *))) {
if (count == size) {
krb5_data *new_data = NULL;
size *= 2;
new_data = realloc(data, size * sizeof(krb5_data));
if (new_data) {
data = new_data;
} else {
retval = ENOMEM;
}
}
if (!retval) {
data[count].length = strlen(component);
data[count].data = strdup(component);
if (!data[count].data) { retval = ENOMEM; }
count++;
}
}
if (!retval) {
princ->type = KRB5_NT_UNKNOWN;
princ->magic = KV5M_PRINCIPAL;
princ->realm = make_data(r, rlen);
princ->data = data;
princ->length = count;
r = NULL; /* take ownership */
data = NULL; /* take ownership */
}
if (data) {
while (--count >= 0) {
free(data[count].data);
}
free(data);
}
free(r);
return retval;
}
krb5_error_code KRB5_CALLCONV
krb5_build_principal_va(krb5_context context,
krb5_principal princ,
unsigned int rlen,
const char *realm,
va_list ap)
{
return build_principal_va(context, princ, rlen, realm, ap);
}
krb5_error_code KRB5_CALLCONV
krb5_build_principal_alloc_va(krb5_context context,
krb5_principal *princ,
unsigned int rlen,
const char *realm,
va_list ap)
{
krb5_error_code retval = 0;
krb5_principal p;
p = malloc(sizeof(krb5_principal_data));
if (p == NULL)
return ENOMEM;
retval = build_principal_va(context, p, rlen, realm, ap);
if (retval) {
free(p);
return retval;
}
*princ = p;
return 0;
}
krb5_error_code KRB5_CALLCONV_C
krb5_build_principal(krb5_context context,
krb5_principal * princ,
unsigned int rlen,
const char * realm, ...)
{
krb5_error_code retval = 0;
va_list ap;
va_start(ap, realm);
retval = krb5_build_principal_alloc_va(context, princ, rlen, realm, ap);
va_end(ap);
return retval;
}
/*Anonymous and well known principals*/
static const char anon_realm_str[] = KRB5_ANONYMOUS_REALMSTR;
static const krb5_data anon_realm_data = {
KV5M_DATA, sizeof(anon_realm_str) - 1, (char *) anon_realm_str
};
static const char wellknown_str[] = KRB5_WELLKNOWN_NAMESTR;
static const char anon_str[] = KRB5_ANONYMOUS_PRINCSTR;
static const krb5_data anon_princ_data[] = {
{ KV5M_DATA, sizeof(wellknown_str) - 1, (char *) wellknown_str },
{ KV5M_DATA, sizeof(anon_str) - 1, (char *) anon_str }
};
const krb5_principal_data anon_princ = {
KV5M_PRINCIPAL,
{ KV5M_DATA, sizeof(anon_realm_str) - 1, (char *) anon_realm_str },
(krb5_data *) anon_princ_data, 2, KRB5_NT_WELLKNOWN
};
const krb5_data * KRB5_CALLCONV
krb5_anonymous_realm()
{
return &anon_realm_data;
}
krb5_const_principal KRB5_CALLCONV
krb5_anonymous_principal()
{
return &anon_princ;
}
| /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/krb/bld_princ.c - Build a principal from a list of strings */
/*
* Copyright 1991 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "k5-int.h"
static krb5_error_code
build_principal_va(krb5_context context, krb5_principal princ,
unsigned int rlen, const char *realm, va_list ap)
{
krb5_error_code retval = 0;
char *r = NULL;
krb5_data *data = NULL;
krb5_int32 count = 0;
krb5_int32 size = 2; /* initial guess at needed space */
char *component = NULL;
data = malloc(size * sizeof(krb5_data));
if (!data) { retval = ENOMEM; }
if (!retval)
r = k5memdup0(realm, rlen, &retval);
while (!retval && (component = va_arg(ap, char *))) {
if (count == size) {
krb5_data *new_data = NULL;
size *= 2;
new_data = realloc(data, size * sizeof(krb5_data));
if (new_data) {
data = new_data;
} else {
retval = ENOMEM;
}
}
if (!retval) {
data[count].length = strlen(component);
data[count].data = strdup(component);
if (!data[count].data) { retval = ENOMEM; }
count++;
}
}
if (!retval) {
princ->type = KRB5_NT_UNKNOWN;
princ->magic = KV5M_PRINCIPAL;
princ->realm = make_data(r, rlen);
princ->data = data;
princ->length = count;
r = NULL; /* take ownership */
data = NULL; /* take ownership */
}
if (data) {
while (--count >= 0) {
free(data[count].data);
}
free(data);
}
free(r);
return retval;
}
krb5_error_code KRB5_CALLCONV
krb5_build_principal_va(krb5_context context,
krb5_principal princ,
unsigned int rlen,
const char *realm,
va_list ap)
{
return build_principal_va(context, princ, rlen, realm, ap);
}
krb5_error_code KRB5_CALLCONV
krb5_build_principal_alloc_va(krb5_context context,
krb5_principal *princ,
unsigned int rlen,
const char *realm,
va_list ap)
{
krb5_error_code retval = 0;
krb5_principal p;
p = malloc(sizeof(krb5_principal_data));
if (p == NULL)
return ENOMEM;
retval = build_principal_va(context, p, rlen, realm, ap);
if (retval) {
free(p);
return retval;
}
*princ = p;
return 0;
}
krb5_error_code KRB5_CALLCONV_C
krb5_build_principal(krb5_context context,
krb5_principal * princ,
unsigned int rlen,
const char * realm, ...)
{
krb5_error_code retval = 0;
va_list ap;
va_start(ap, realm);
retval = krb5_build_principal_alloc_va(context, princ, rlen, realm, ap);
va_end(ap);
return retval;
}
/*Anonymous and well known principals*/
static const char anon_realm_str[] = KRB5_ANONYMOUS_REALMSTR;
static const krb5_data anon_realm_data = {
KV5M_DATA, sizeof(anon_realm_str) - 1, (char *) anon_realm_str
};
static const char wellknown_str[] = KRB5_WELLKNOWN_NAMESTR;
static const char anon_str[] = KRB5_ANONYMOUS_PRINCSTR;
static const krb5_data anon_princ_data[] = {
{ KV5M_DATA, sizeof(wellknown_str) - 1, (char *) wellknown_str },
{ KV5M_DATA, sizeof(anon_str) - 1, (char *) anon_str }
};
const krb5_principal_data anon_princ = {
KV5M_PRINCIPAL,
{ KV5M_DATA, sizeof(anon_realm_str) - 1, (char *) anon_realm_str },
(krb5_data *) anon_princ_data, 2, KRB5_NT_WELLKNOWN
};
const krb5_data * KRB5_CALLCONV
krb5_anonymous_realm()
{
return &anon_realm_data;
}
krb5_const_principal KRB5_CALLCONV
krb5_anonymous_principal()
{
return &anon_princ;
}
|
272,118,200,307,017 | /* -*- mode: c; c-file-style: "bsd"; indent-tabs-mode: t -*- */
/*
* Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved
*/
#include <gssrpc/rpc.h>
#include <krb5.h>
#include <errno.h>
#include <kadm5/admin.h>
#include <kadm5/kadm_rpc.h>
#include <kadm5/admin_xdr.h>
#include <stdlib.h>
#include <string.h>
static bool_t
_xdr_kadm5_principal_ent_rec(XDR *xdrs, kadm5_principal_ent_rec *objp,
int v);
static bool_t
_xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp, int vers);
/*
* Function: xdr_ui_4
*
* Purpose: XDR function which serves as a wrapper for xdr_u_int32,
* to prevent compiler warnings about type clashes between u_int32
* and krb5_ui_4.
*/
bool_t xdr_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
/* Assumes that krb5_ui_4 and u_int32 are both four bytes long.
This should not be a harmful assumption. */
return xdr_u_int32(xdrs, (uint32_t *) objp);
}
/*
* Function: xdr_nullstring
*
* Purpose: XDR function for "strings" that are either NULL-terminated
* or NULL.
*/
bool_t xdr_nullstring(XDR *xdrs, char **objp)
{
u_int size;
if (xdrs->x_op == XDR_ENCODE) {
if (*objp == NULL)
size = 0;
else
size = strlen(*objp) + 1;
}
if (! xdr_u_int(xdrs, &size)) {
return FALSE;
}
switch (xdrs->x_op) {
case XDR_DECODE:
if (size == 0) {
*objp = NULL;
return TRUE;
} else if (*objp == NULL) {
*objp = (char *) mem_alloc(size);
if (*objp == NULL) {
errno = ENOMEM;
return FALSE;
}
}
return (xdr_opaque(xdrs, *objp, size));
case XDR_ENCODE:
if (size != 0)
return (xdr_opaque(xdrs, *objp, size));
return TRUE;
case XDR_FREE:
if (*objp != NULL)
mem_free(*objp, size);
*objp = NULL;
return TRUE;
}
return FALSE;
}
/*
* Function: xdr_nulltype
*
* Purpose: XDR function for arbitrary pointer types that are either
* NULL or contain data.
*/
bool_t xdr_nulltype(XDR *xdrs, void **objp, xdrproc_t proc)
{
bool_t null;
switch (xdrs->x_op) {
case XDR_DECODE:
if (!xdr_bool(xdrs, &null))
return FALSE;
if (null) {
*objp = NULL;
return TRUE;
}
return (*proc)(xdrs, objp);
case XDR_ENCODE:
if (*objp == NULL)
null = TRUE;
else
null = FALSE;
if (!xdr_bool(xdrs, &null))
return FALSE;
if (null == FALSE)
return (*proc)(xdrs, objp);
return TRUE;
case XDR_FREE:
if (*objp)
return (*proc)(xdrs, objp);
return TRUE;
}
return FALSE;
}
bool_t
xdr_krb5_timestamp(XDR *xdrs, krb5_timestamp *objp)
{
/* This assumes that int32 and krb5_timestamp are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_kvno(XDR *xdrs, krb5_kvno *objp)
{
return xdr_u_int(xdrs, objp);
}
bool_t
xdr_krb5_deltat(XDR *xdrs, krb5_deltat *objp)
{
/* This assumes that int32 and krb5_deltat are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_flags(XDR *xdrs, krb5_flags *objp)
{
/* This assumes that int32 and krb5_flags are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
if (!xdr_u_int32(xdrs, (uint32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_int16(XDR *xdrs, krb5_int16 *objp)
{
int tmp;
tmp = (int) *objp;
if (!xdr_int(xdrs, &tmp))
return(FALSE);
*objp = (krb5_int16) tmp;
return(TRUE);
}
/*
* Function: xdr_krb5_ui_2
*
* Purpose: XDR function which serves as a wrapper for xdr_u_int,
* to prevent compiler warnings about type clashes between u_int
* and krb5_ui_2.
*/
bool_t
xdr_krb5_ui_2(XDR *xdrs, krb5_ui_2 *objp)
{
unsigned int tmp;
tmp = (unsigned int) *objp;
if (!xdr_u_int(xdrs, &tmp))
return(FALSE);
*objp = (krb5_ui_2) tmp;
return(TRUE);
}
static bool_t xdr_krb5_boolean(XDR *xdrs, krb5_boolean *kbool)
{
bool_t val;
switch (xdrs->x_op) {
case XDR_DECODE:
if (!xdr_bool(xdrs, &val))
return FALSE;
*kbool = (val == FALSE) ? FALSE : TRUE;
return TRUE;
case XDR_ENCODE:
val = *kbool ? TRUE : FALSE;
return xdr_bool(xdrs, &val);
case XDR_FREE:
return TRUE;
}
return FALSE;
}
bool_t xdr_krb5_key_data_nocontents(XDR *xdrs, krb5_key_data *objp)
{
/*
* Note that this function intentionally DOES NOT tranfer key
* length or contents! xdr_krb5_key_data in adb_xdr.c does, but
* that is only for use within the server-side library.
*/
unsigned int tmp;
if (xdrs->x_op == XDR_DECODE)
memset(objp, 0, sizeof(krb5_key_data));
if (!xdr_krb5_int16(xdrs, &objp->key_data_ver)) {
return (FALSE);
}
if (!xdr_krb5_ui_2(xdrs, &objp->key_data_kvno)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->key_data_type[0])) {
return (FALSE);
}
if (objp->key_data_ver > 1) {
if (!xdr_krb5_int16(xdrs, &objp->key_data_type[1])) {
return (FALSE);
}
}
/*
* kadm5_get_principal on the server side allocates and returns
* key contents when asked. Even though this function refuses to
* transmit that data, it still has to *free* the data at the
* appropriate time to avoid a memory leak.
*/
if (xdrs->x_op == XDR_FREE) {
tmp = (unsigned int) objp->key_data_length[0];
if (!xdr_bytes(xdrs, (char **) &objp->key_data_contents[0],
&tmp, ~0))
return FALSE;
tmp = (unsigned int) objp->key_data_length[1];
if (!xdr_bytes(xdrs, (char **) &objp->key_data_contents[1],
&tmp, ~0))
return FALSE;
}
return (TRUE);
}
bool_t
xdr_krb5_key_salt_tuple(XDR *xdrs, krb5_key_salt_tuple *objp)
{
if (!xdr_krb5_enctype(xdrs, &objp->ks_enctype))
return FALSE;
if (!xdr_krb5_salttype(xdrs, &objp->ks_salttype))
return FALSE;
return TRUE;
}
bool_t xdr_krb5_tl_data(XDR *xdrs, krb5_tl_data **tl_data_head)
{
krb5_tl_data *tl, *tl2;
bool_t more;
unsigned int len;
switch (xdrs->x_op) {
case XDR_FREE:
tl = tl2 = *tl_data_head;
while (tl) {
tl2 = tl->tl_data_next;
free(tl->tl_data_contents);
free(tl);
tl = tl2;
}
*tl_data_head = NULL;
break;
case XDR_ENCODE:
tl = *tl_data_head;
while (1) {
more = (tl != NULL);
if (!xdr_bool(xdrs, &more))
return FALSE;
if (tl == NULL)
break;
if (!xdr_krb5_int16(xdrs, &tl->tl_data_type))
return FALSE;
len = tl->tl_data_length;
if (!xdr_bytes(xdrs, (char **) &tl->tl_data_contents, &len, ~0))
return FALSE;
tl = tl->tl_data_next;
}
break;
case XDR_DECODE:
tl = NULL;
while (1) {
if (!xdr_bool(xdrs, &more))
return FALSE;
if (more == FALSE)
break;
tl2 = (krb5_tl_data *) malloc(sizeof(krb5_tl_data));
if (tl2 == NULL)
return FALSE;
memset(tl2, 0, sizeof(krb5_tl_data));
if (!xdr_krb5_int16(xdrs, &tl2->tl_data_type))
return FALSE;
if (!xdr_bytes(xdrs, (char **)&tl2->tl_data_contents, &len, ~0))
return FALSE;
tl2->tl_data_length = len;
tl2->tl_data_next = tl;
tl = tl2;
}
*tl_data_head = tl;
break;
}
return TRUE;
}
bool_t
xdr_kadm5_ret_t(XDR *xdrs, kadm5_ret_t *objp)
{
uint32_t tmp;
if (xdrs->x_op == XDR_ENCODE)
tmp = (uint32_t) *objp;
if (!xdr_u_int32(xdrs, &tmp))
return (FALSE);
if (xdrs->x_op == XDR_DECODE)
*objp = (kadm5_ret_t) tmp;
return (TRUE);
}
bool_t xdr_kadm5_principal_ent_rec(XDR *xdrs,
kadm5_principal_ent_rec *objp)
{
return _xdr_kadm5_principal_ent_rec(xdrs, objp, KADM5_API_VERSION_3);
}
static bool_t
_xdr_kadm5_principal_ent_rec(XDR *xdrs, kadm5_principal_ent_rec *objp,
int v)
{
unsigned int n;
if (!xdr_krb5_principal(xdrs, &objp->principal)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->princ_expire_time)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_pwd_change)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->pw_expiration)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_life)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->mod_name,
xdr_krb5_principal)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->mod_date)) {
return (FALSE);
}
if (!xdr_krb5_flags(xdrs, &objp->attributes)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->kvno)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->mkvno)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->policy)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->aux_attributes)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_renewable_life)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_success)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_failed)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->fail_auth_count)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_key_data)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_tl_data)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->tl_data,
xdr_krb5_tl_data)) {
return FALSE;
}
n = objp->n_key_data;
if (!xdr_array(xdrs, (caddr_t *) &objp->key_data,
&n, ~0, sizeof(krb5_key_data),
xdr_krb5_key_data_nocontents)) {
return (FALSE);
}
return (TRUE);
}
static bool_t
_xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp, int vers)
{
if (!xdr_nullstring(xdrs, &objp->policy)) {
return (FALSE);
}
/* these all used to be u_int32, but it's stupid for sized types
to be exposed at the api, and they're the same as longs on the
wire. */
if (!xdr_long(xdrs, &objp->pw_min_life)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_max_life)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_min_length)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_min_classes)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_history_num)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->policy_refcnt)) {
return (FALSE);
}
if (xdrs->x_op == XDR_DECODE) {
objp->pw_max_fail = 0;
objp->pw_failcnt_interval = 0;
objp->pw_lockout_duration = 0;
objp->attributes = 0;
objp->max_life = 0;
objp->max_renewable_life = 0;
objp->allowed_keysalts = NULL;
objp->n_tl_data = 0;
objp->tl_data = NULL;
}
if (vers >= KADM5_API_VERSION_3) {
if (!xdr_krb5_kvno(xdrs, &objp->pw_max_fail))
return (FALSE);
if (!xdr_krb5_deltat(xdrs, &objp->pw_failcnt_interval))
return (FALSE);
if (!xdr_krb5_deltat(xdrs, &objp->pw_lockout_duration))
return (FALSE);
}
if (vers >= KADM5_API_VERSION_4) {
if (!xdr_krb5_flags(xdrs, &objp->attributes)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_life)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_renewable_life)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->allowed_keysalts)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_tl_data)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->tl_data,
xdr_krb5_tl_data)) {
return FALSE;
}
}
return (TRUE);
}
bool_t
xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp)
{
return _xdr_kadm5_policy_ent_rec(xdrs, objp, KADM5_API_VERSION_4);
}
bool_t
xdr_cprinc_arg(XDR *xdrs, cprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->passwd)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_cprinc3_arg(XDR *xdrs, cprinc3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int *)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->passwd)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_generic_ret(XDR *xdrs, generic_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
return(TRUE);
}
bool_t
xdr_dprinc_arg(XDR *xdrs, dprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_mprinc_arg(XDR *xdrs, mprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_rprinc_arg(XDR *xdrs, rprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->src)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->dest)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gprincs_arg(XDR *xdrs, gprincs_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->exp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gprincs_ret(XDR *xdrs, gprincs_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->princs,
(unsigned int *) &objp->count, ~0,
sizeof(char *), xdr_nullstring)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_chpass_arg(XDR *xdrs, chpass_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chpass3_arg(XDR *xdrs, chpass3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setv4key_arg(XDR *xdrs, setv4key_arg *objp)
{
unsigned int n_keys = 1;
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblock,
&n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setkey_arg(XDR *xdrs, setkey_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblocks,
(unsigned int *) &objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setkey3_arg(XDR *xdrs, setkey3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->ks_tuple,
(unsigned int *) &objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple), xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblocks,
(unsigned int *) &objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand_arg(XDR *xdrs, chrand_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand3_arg(XDR *xdrs, chrand3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand_ret(XDR *xdrs, chrand_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_array(xdrs, (char **)&objp->keys,
(unsigned int *)&objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock))
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gprinc_arg(XDR *xdrs, gprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gprinc_ret(XDR *xdrs, gprinc_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if(objp->code == KADM5_OK) {
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_cpol_arg(XDR *xdrs, cpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_dpol_arg(XDR *xdrs, dpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->name)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_mpol_arg(XDR *xdrs, mpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpol_arg(XDR *xdrs, gpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->name)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpol_ret(XDR *xdrs, gpol_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if(objp->code == KADM5_OK) {
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version))
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpols_arg(XDR *xdrs, gpols_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->exp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpols_ret(XDR *xdrs, gpols_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->pols,
(unsigned int *) &objp->count, ~0,
sizeof(char *), xdr_nullstring)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t xdr_getprivs_ret(XDR *xdrs, getprivs_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (! xdr_kadm5_ret_t(xdrs, &objp->code) ||
! xdr_long(xdrs, &objp->privs))
return FALSE;
return TRUE;
}
bool_t
xdr_purgekeys_arg(XDR *xdrs, purgekeys_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_int(xdrs, &objp->keepkvno)) {
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gstrings_arg(XDR *xdrs, gstrings_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gstrings_ret(XDR *xdrs, gstrings_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->strings,
(unsigned int *) &objp->count, ~0,
sizeof(krb5_string_attr),
xdr_krb5_string_attr)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_sstring_arg(XDR *xdrs, sstring_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->key)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->value)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_principal(XDR *xdrs, krb5_principal *objp)
{
int ret;
char *p = NULL;
krb5_principal pr = NULL;
static krb5_context context = NULL;
/* using a static context here is ugly, but should work
ok, and the other solutions are even uglier */
if (!context &&
kadm5_init_krb5_context(&context))
return(FALSE);
switch(xdrs->x_op) {
case XDR_ENCODE:
if (*objp) {
if((ret = krb5_unparse_name(context, *objp, &p)) != 0)
return FALSE;
}
if(!xdr_nullstring(xdrs, &p))
return FALSE;
if (p) free(p);
break;
case XDR_DECODE:
if(!xdr_nullstring(xdrs, &p))
return FALSE;
if (p) {
ret = krb5_parse_name(context, p, &pr);
if(ret != 0)
return FALSE;
*objp = pr;
free(p);
} else
*objp = NULL;
break;
case XDR_FREE:
if(*objp != NULL)
krb5_free_principal(context, *objp);
*objp = NULL;
break;
}
return TRUE;
}
bool_t
xdr_krb5_octet(XDR *xdrs, krb5_octet *objp)
{
if (!xdr_u_char(xdrs, objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_krb5_enctype(XDR *xdrs, krb5_enctype *objp)
{
/*
* This used to be xdr_krb5_keytype, but keytypes and enctypes have
* been merged into only enctypes. However, randkey_principal
* already ensures that only a key of ENCTYPE_DES_CBC_CRC will be
* returned to v1 clients, and ENCTYPE_DES_CBC_CRC has the same
* value as KEYTYPE_DES used too, which is what all v1 clients
* expect. Therefore, IMHO, just encoding whatever enctype we get
* is safe.
*/
if (!xdr_int32(xdrs, (int32_t *) objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_krb5_salttype(XDR *xdrs, krb5_int32 *objp)
{
if (!xdr_int32(xdrs, (int32_t *) objp))
return FALSE;
return TRUE;
}
bool_t
xdr_krb5_keyblock(XDR *xdrs, krb5_keyblock *objp)
{
/* XXX This only works because free_keyblock assumes ->contents
is allocated by malloc() */
if(!xdr_krb5_enctype(xdrs, &objp->enctype))
return FALSE;
if(!xdr_bytes(xdrs, (char **) &objp->contents, (unsigned int *)
&objp->length, ~0))
return FALSE;
return TRUE;
}
bool_t
xdr_krb5_string_attr(XDR *xdrs, krb5_string_attr *objp)
{
if (!xdr_nullstring(xdrs, &objp->key))
return FALSE;
if (!xdr_nullstring(xdrs, &objp->value))
return FALSE;
if (xdrs->x_op == XDR_DECODE &&
(objp->key == NULL || objp->value == NULL))
return FALSE;
return TRUE;
}
| /* -*- mode: c; c-file-style: "bsd"; indent-tabs-mode: t -*- */
/*
* Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved
*/
#include <gssrpc/rpc.h>
#include <krb5.h>
#include <errno.h>
#include <kadm5/admin.h>
#include <kadm5/kadm_rpc.h>
#include <kadm5/admin_xdr.h>
#include <stdlib.h>
#include <string.h>
static bool_t
_xdr_kadm5_principal_ent_rec(XDR *xdrs, kadm5_principal_ent_rec *objp,
int v);
static bool_t
_xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp, int vers);
/*
* Function: xdr_ui_4
*
* Purpose: XDR function which serves as a wrapper for xdr_u_int32,
* to prevent compiler warnings about type clashes between u_int32
* and krb5_ui_4.
*/
bool_t xdr_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
/* Assumes that krb5_ui_4 and u_int32 are both four bytes long.
This should not be a harmful assumption. */
return xdr_u_int32(xdrs, (uint32_t *) objp);
}
/*
* Function: xdr_nullstring
*
* Purpose: XDR function for "strings" that are either NULL-terminated
* or NULL.
*/
bool_t xdr_nullstring(XDR *xdrs, char **objp)
{
u_int size;
if (xdrs->x_op == XDR_ENCODE) {
if (*objp == NULL)
size = 0;
else
size = strlen(*objp) + 1;
}
if (! xdr_u_int(xdrs, &size)) {
return FALSE;
}
switch (xdrs->x_op) {
case XDR_DECODE:
if (size == 0) {
*objp = NULL;
return TRUE;
} else if (*objp == NULL) {
*objp = (char *) mem_alloc(size);
if (*objp == NULL) {
errno = ENOMEM;
return FALSE;
}
}
if (!xdr_opaque(xdrs, *objp, size))
return FALSE;
/* Check that the unmarshalled bytes are a C string. */
if ((*objp)[size - 1] != '\0')
return FALSE;
if (memchr(*objp, '\0', size - 1) != NULL)
return FALSE;
return TRUE;
case XDR_ENCODE:
if (size != 0)
return (xdr_opaque(xdrs, *objp, size));
return TRUE;
case XDR_FREE:
if (*objp != NULL)
mem_free(*objp, size);
*objp = NULL;
return TRUE;
}
return FALSE;
}
/*
* Function: xdr_nulltype
*
* Purpose: XDR function for arbitrary pointer types that are either
* NULL or contain data.
*/
bool_t xdr_nulltype(XDR *xdrs, void **objp, xdrproc_t proc)
{
bool_t null;
switch (xdrs->x_op) {
case XDR_DECODE:
if (!xdr_bool(xdrs, &null))
return FALSE;
if (null) {
*objp = NULL;
return TRUE;
}
return (*proc)(xdrs, objp);
case XDR_ENCODE:
if (*objp == NULL)
null = TRUE;
else
null = FALSE;
if (!xdr_bool(xdrs, &null))
return FALSE;
if (null == FALSE)
return (*proc)(xdrs, objp);
return TRUE;
case XDR_FREE:
if (*objp)
return (*proc)(xdrs, objp);
return TRUE;
}
return FALSE;
}
bool_t
xdr_krb5_timestamp(XDR *xdrs, krb5_timestamp *objp)
{
/* This assumes that int32 and krb5_timestamp are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_kvno(XDR *xdrs, krb5_kvno *objp)
{
return xdr_u_int(xdrs, objp);
}
bool_t
xdr_krb5_deltat(XDR *xdrs, krb5_deltat *objp)
{
/* This assumes that int32 and krb5_deltat are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_flags(XDR *xdrs, krb5_flags *objp)
{
/* This assumes that int32 and krb5_flags are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_ui_4(XDR *xdrs, krb5_ui_4 *objp)
{
if (!xdr_u_int32(xdrs, (uint32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_int16(XDR *xdrs, krb5_int16 *objp)
{
int tmp;
tmp = (int) *objp;
if (!xdr_int(xdrs, &tmp))
return(FALSE);
*objp = (krb5_int16) tmp;
return(TRUE);
}
/*
* Function: xdr_krb5_ui_2
*
* Purpose: XDR function which serves as a wrapper for xdr_u_int,
* to prevent compiler warnings about type clashes between u_int
* and krb5_ui_2.
*/
bool_t
xdr_krb5_ui_2(XDR *xdrs, krb5_ui_2 *objp)
{
unsigned int tmp;
tmp = (unsigned int) *objp;
if (!xdr_u_int(xdrs, &tmp))
return(FALSE);
*objp = (krb5_ui_2) tmp;
return(TRUE);
}
static bool_t xdr_krb5_boolean(XDR *xdrs, krb5_boolean *kbool)
{
bool_t val;
switch (xdrs->x_op) {
case XDR_DECODE:
if (!xdr_bool(xdrs, &val))
return FALSE;
*kbool = (val == FALSE) ? FALSE : TRUE;
return TRUE;
case XDR_ENCODE:
val = *kbool ? TRUE : FALSE;
return xdr_bool(xdrs, &val);
case XDR_FREE:
return TRUE;
}
return FALSE;
}
bool_t xdr_krb5_key_data_nocontents(XDR *xdrs, krb5_key_data *objp)
{
/*
* Note that this function intentionally DOES NOT tranfer key
* length or contents! xdr_krb5_key_data in adb_xdr.c does, but
* that is only for use within the server-side library.
*/
unsigned int tmp;
if (xdrs->x_op == XDR_DECODE)
memset(objp, 0, sizeof(krb5_key_data));
if (!xdr_krb5_int16(xdrs, &objp->key_data_ver)) {
return (FALSE);
}
if (!xdr_krb5_ui_2(xdrs, &objp->key_data_kvno)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->key_data_type[0])) {
return (FALSE);
}
if (objp->key_data_ver > 1) {
if (!xdr_krb5_int16(xdrs, &objp->key_data_type[1])) {
return (FALSE);
}
}
/*
* kadm5_get_principal on the server side allocates and returns
* key contents when asked. Even though this function refuses to
* transmit that data, it still has to *free* the data at the
* appropriate time to avoid a memory leak.
*/
if (xdrs->x_op == XDR_FREE) {
tmp = (unsigned int) objp->key_data_length[0];
if (!xdr_bytes(xdrs, (char **) &objp->key_data_contents[0],
&tmp, ~0))
return FALSE;
tmp = (unsigned int) objp->key_data_length[1];
if (!xdr_bytes(xdrs, (char **) &objp->key_data_contents[1],
&tmp, ~0))
return FALSE;
}
return (TRUE);
}
bool_t
xdr_krb5_key_salt_tuple(XDR *xdrs, krb5_key_salt_tuple *objp)
{
if (!xdr_krb5_enctype(xdrs, &objp->ks_enctype))
return FALSE;
if (!xdr_krb5_salttype(xdrs, &objp->ks_salttype))
return FALSE;
return TRUE;
}
bool_t xdr_krb5_tl_data(XDR *xdrs, krb5_tl_data **tl_data_head)
{
krb5_tl_data *tl, *tl2;
bool_t more;
unsigned int len;
switch (xdrs->x_op) {
case XDR_FREE:
tl = tl2 = *tl_data_head;
while (tl) {
tl2 = tl->tl_data_next;
free(tl->tl_data_contents);
free(tl);
tl = tl2;
}
*tl_data_head = NULL;
break;
case XDR_ENCODE:
tl = *tl_data_head;
while (1) {
more = (tl != NULL);
if (!xdr_bool(xdrs, &more))
return FALSE;
if (tl == NULL)
break;
if (!xdr_krb5_int16(xdrs, &tl->tl_data_type))
return FALSE;
len = tl->tl_data_length;
if (!xdr_bytes(xdrs, (char **) &tl->tl_data_contents, &len, ~0))
return FALSE;
tl = tl->tl_data_next;
}
break;
case XDR_DECODE:
tl = NULL;
while (1) {
if (!xdr_bool(xdrs, &more))
return FALSE;
if (more == FALSE)
break;
tl2 = (krb5_tl_data *) malloc(sizeof(krb5_tl_data));
if (tl2 == NULL)
return FALSE;
memset(tl2, 0, sizeof(krb5_tl_data));
if (!xdr_krb5_int16(xdrs, &tl2->tl_data_type))
return FALSE;
if (!xdr_bytes(xdrs, (char **)&tl2->tl_data_contents, &len, ~0))
return FALSE;
tl2->tl_data_length = len;
tl2->tl_data_next = tl;
tl = tl2;
}
*tl_data_head = tl;
break;
}
return TRUE;
}
bool_t
xdr_kadm5_ret_t(XDR *xdrs, kadm5_ret_t *objp)
{
uint32_t tmp;
if (xdrs->x_op == XDR_ENCODE)
tmp = (uint32_t) *objp;
if (!xdr_u_int32(xdrs, &tmp))
return (FALSE);
if (xdrs->x_op == XDR_DECODE)
*objp = (kadm5_ret_t) tmp;
return (TRUE);
}
bool_t xdr_kadm5_principal_ent_rec(XDR *xdrs,
kadm5_principal_ent_rec *objp)
{
return _xdr_kadm5_principal_ent_rec(xdrs, objp, KADM5_API_VERSION_3);
}
static bool_t
_xdr_kadm5_principal_ent_rec(XDR *xdrs, kadm5_principal_ent_rec *objp,
int v)
{
unsigned int n;
if (!xdr_krb5_principal(xdrs, &objp->principal)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->princ_expire_time)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_pwd_change)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->pw_expiration)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_life)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->mod_name,
xdr_krb5_principal)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->mod_date)) {
return (FALSE);
}
if (!xdr_krb5_flags(xdrs, &objp->attributes)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->kvno)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->mkvno)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->policy)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->aux_attributes)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_renewable_life)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_success)) {
return (FALSE);
}
if (!xdr_krb5_timestamp(xdrs, &objp->last_failed)) {
return (FALSE);
}
if (!xdr_krb5_kvno(xdrs, &objp->fail_auth_count)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_key_data)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_tl_data)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->tl_data,
xdr_krb5_tl_data)) {
return FALSE;
}
n = objp->n_key_data;
if (!xdr_array(xdrs, (caddr_t *) &objp->key_data,
&n, ~0, sizeof(krb5_key_data),
xdr_krb5_key_data_nocontents)) {
return (FALSE);
}
return (TRUE);
}
static bool_t
_xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp, int vers)
{
if (!xdr_nullstring(xdrs, &objp->policy)) {
return (FALSE);
}
/* these all used to be u_int32, but it's stupid for sized types
to be exposed at the api, and they're the same as longs on the
wire. */
if (!xdr_long(xdrs, &objp->pw_min_life)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_max_life)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_min_length)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_min_classes)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->pw_history_num)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->policy_refcnt)) {
return (FALSE);
}
if (xdrs->x_op == XDR_DECODE) {
objp->pw_max_fail = 0;
objp->pw_failcnt_interval = 0;
objp->pw_lockout_duration = 0;
objp->attributes = 0;
objp->max_life = 0;
objp->max_renewable_life = 0;
objp->allowed_keysalts = NULL;
objp->n_tl_data = 0;
objp->tl_data = NULL;
}
if (vers >= KADM5_API_VERSION_3) {
if (!xdr_krb5_kvno(xdrs, &objp->pw_max_fail))
return (FALSE);
if (!xdr_krb5_deltat(xdrs, &objp->pw_failcnt_interval))
return (FALSE);
if (!xdr_krb5_deltat(xdrs, &objp->pw_lockout_duration))
return (FALSE);
}
if (vers >= KADM5_API_VERSION_4) {
if (!xdr_krb5_flags(xdrs, &objp->attributes)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_life)) {
return (FALSE);
}
if (!xdr_krb5_deltat(xdrs, &objp->max_renewable_life)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->allowed_keysalts)) {
return (FALSE);
}
if (!xdr_krb5_int16(xdrs, &objp->n_tl_data)) {
return (FALSE);
}
if (!xdr_nulltype(xdrs, (void **) &objp->tl_data,
xdr_krb5_tl_data)) {
return FALSE;
}
}
return (TRUE);
}
bool_t
xdr_kadm5_policy_ent_rec(XDR *xdrs, kadm5_policy_ent_rec *objp)
{
return _xdr_kadm5_policy_ent_rec(xdrs, objp, KADM5_API_VERSION_4);
}
bool_t
xdr_cprinc_arg(XDR *xdrs, cprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->passwd)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_cprinc3_arg(XDR *xdrs, cprinc3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int *)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->passwd)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_generic_ret(XDR *xdrs, generic_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
return(TRUE);
}
bool_t
xdr_dprinc_arg(XDR *xdrs, dprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_mprinc_arg(XDR *xdrs, mprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_rprinc_arg(XDR *xdrs, rprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->src)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->dest)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gprincs_arg(XDR *xdrs, gprincs_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->exp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gprincs_ret(XDR *xdrs, gprincs_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->princs,
(unsigned int *) &objp->count, ~0,
sizeof(char *), xdr_nullstring)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_chpass_arg(XDR *xdrs, chpass_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chpass3_arg(XDR *xdrs, chpass3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setv4key_arg(XDR *xdrs, setv4key_arg *objp)
{
unsigned int n_keys = 1;
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblock,
&n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setkey_arg(XDR *xdrs, setkey_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblocks,
(unsigned int *) &objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_setkey3_arg(XDR *xdrs, setkey3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->ks_tuple,
(unsigned int *) &objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple), xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->keyblocks,
(unsigned int *) &objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand_arg(XDR *xdrs, chrand_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand3_arg(XDR *xdrs, chrand3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_chrand_ret(XDR *xdrs, chrand_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_array(xdrs, (char **)&objp->keys,
(unsigned int *)&objp->n_keys, ~0,
sizeof(krb5_keyblock), xdr_krb5_keyblock))
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gprinc_arg(XDR *xdrs, gprinc_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gprinc_ret(XDR *xdrs, gprinc_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if(objp->code == KADM5_OK) {
if (!_xdr_kadm5_principal_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_cpol_arg(XDR *xdrs, cpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_dpol_arg(XDR *xdrs, dpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->name)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_mpol_arg(XDR *xdrs, mpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version)) {
return (FALSE);
}
if (!xdr_long(xdrs, &objp->mask)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpol_arg(XDR *xdrs, gpol_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->name)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpol_ret(XDR *xdrs, gpol_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if(objp->code == KADM5_OK) {
if (!_xdr_kadm5_policy_ent_rec(xdrs, &objp->rec,
objp->api_version))
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpols_arg(XDR *xdrs, gpols_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->exp)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gpols_ret(XDR *xdrs, gpols_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->pols,
(unsigned int *) &objp->count, ~0,
sizeof(char *), xdr_nullstring)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t xdr_getprivs_ret(XDR *xdrs, getprivs_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (! xdr_kadm5_ret_t(xdrs, &objp->code) ||
! xdr_long(xdrs, &objp->privs))
return FALSE;
return TRUE;
}
bool_t
xdr_purgekeys_arg(XDR *xdrs, purgekeys_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_int(xdrs, &objp->keepkvno)) {
return FALSE;
}
return (TRUE);
}
bool_t
xdr_gstrings_arg(XDR *xdrs, gstrings_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_gstrings_ret(XDR *xdrs, gstrings_ret *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_kadm5_ret_t(xdrs, &objp->code)) {
return (FALSE);
}
if (objp->code == KADM5_OK) {
if (!xdr_int(xdrs, &objp->count)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *) &objp->strings,
(unsigned int *) &objp->count, ~0,
sizeof(krb5_string_attr),
xdr_krb5_string_attr)) {
return (FALSE);
}
}
return (TRUE);
}
bool_t
xdr_sstring_arg(XDR *xdrs, sstring_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->key)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->value)) {
return (FALSE);
}
return (TRUE);
}
bool_t
xdr_krb5_principal(XDR *xdrs, krb5_principal *objp)
{
int ret;
char *p = NULL;
krb5_principal pr = NULL;
static krb5_context context = NULL;
/* using a static context here is ugly, but should work
ok, and the other solutions are even uglier */
if (!context &&
kadm5_init_krb5_context(&context))
return(FALSE);
switch(xdrs->x_op) {
case XDR_ENCODE:
if (*objp) {
if((ret = krb5_unparse_name(context, *objp, &p)) != 0)
return FALSE;
}
if(!xdr_nullstring(xdrs, &p))
return FALSE;
if (p) free(p);
break;
case XDR_DECODE:
if(!xdr_nullstring(xdrs, &p))
return FALSE;
if (p) {
ret = krb5_parse_name(context, p, &pr);
if(ret != 0)
return FALSE;
*objp = pr;
free(p);
} else
*objp = NULL;
break;
case XDR_FREE:
if(*objp != NULL)
krb5_free_principal(context, *objp);
*objp = NULL;
break;
}
return TRUE;
}
bool_t
xdr_krb5_octet(XDR *xdrs, krb5_octet *objp)
{
if (!xdr_u_char(xdrs, objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_krb5_enctype(XDR *xdrs, krb5_enctype *objp)
{
/*
* This used to be xdr_krb5_keytype, but keytypes and enctypes have
* been merged into only enctypes. However, randkey_principal
* already ensures that only a key of ENCTYPE_DES_CBC_CRC will be
* returned to v1 clients, and ENCTYPE_DES_CBC_CRC has the same
* value as KEYTYPE_DES used too, which is what all v1 clients
* expect. Therefore, IMHO, just encoding whatever enctype we get
* is safe.
*/
if (!xdr_int32(xdrs, (int32_t *) objp))
return (FALSE);
return (TRUE);
}
bool_t
xdr_krb5_salttype(XDR *xdrs, krb5_int32 *objp)
{
if (!xdr_int32(xdrs, (int32_t *) objp))
return FALSE;
return TRUE;
}
bool_t
xdr_krb5_keyblock(XDR *xdrs, krb5_keyblock *objp)
{
/* XXX This only works because free_keyblock assumes ->contents
is allocated by malloc() */
if(!xdr_krb5_enctype(xdrs, &objp->enctype))
return FALSE;
if(!xdr_bytes(xdrs, (char **) &objp->contents, (unsigned int *)
&objp->length, ~0))
return FALSE;
return TRUE;
}
bool_t
xdr_krb5_string_attr(XDR *xdrs, krb5_string_attr *objp)
{
if (!xdr_nullstring(xdrs, &objp->key))
return FALSE;
if (!xdr_nullstring(xdrs, &objp->value))
return FALSE;
if (xdrs->x_op == XDR_DECODE &&
(objp->key == NULL || objp->value == NULL))
return FALSE;
return TRUE;
}
|
17,097,258,194,479 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* kdc/do_as_req.c */
/*
* Portions Copyright (C) 2007 Apple Inc.
* Copyright 1990, 1991, 2007, 2008, 2009, 2013, 2014 by the
* Massachusetts Institute of Technology. All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
*
* KDC Routines to deal with AS_REQ's
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "k5-int.h"
#include "com_err.h"
#include <syslog.h>
#ifdef HAVE_NETINET_IN_H
#include <sys/types.h>
#include <netinet/in.h>
#ifndef hpux
#include <arpa/inet.h>
#endif /* hpux */
#endif /* HAVE_NETINET_IN_H */
#include "kdc_util.h"
#include "kdc_audit.h"
#include "policy.h"
#include <kadm5/admin.h>
#include "adm_proto.h"
#include "extern.h"
static krb5_error_code
prepare_error_as(struct kdc_request_state *, krb5_kdc_req *, krb5_db_entry *,
int, krb5_pa_data **, krb5_boolean, krb5_principal,
krb5_data **, const char *);
/* Determine the key-expiration value according to RFC 4120 section 5.4.2. */
static krb5_timestamp
get_key_exp(krb5_db_entry *entry)
{
if (entry->expiration == 0)
return entry->pw_expiration;
if (entry->pw_expiration == 0)
return entry->expiration;
return ts_min(entry->expiration, entry->pw_expiration);
}
/*
* Find the key in client for the most preferred enctype in req_enctypes. Fill
* in *kb_out with the decrypted keyblock (which the caller must free) and set
* *kd_out to an alias to that key data entry. Set *kd_out to NULL and leave
* *kb_out zeroed if no key is found for any of the requested enctypes.
* kb_out->enctype may differ from the enctype of *kd_out for DES enctypes; in
* this case, kb_out->enctype is the requested enctype used to match the key
* data entry.
*/
static krb5_error_code
select_client_key(krb5_context context, krb5_db_entry *client,
krb5_enctype *req_enctypes, int n_req_enctypes,
krb5_keyblock *kb_out, krb5_key_data **kd_out)
{
krb5_error_code ret;
krb5_key_data *kd;
krb5_enctype etype;
int i;
memset(kb_out, 0, sizeof(*kb_out));
*kd_out = NULL;
for (i = 0; i < n_req_enctypes; i++) {
etype = req_enctypes[i];
if (!krb5_c_valid_enctype(etype))
continue;
if (krb5_dbe_find_enctype(context, client, etype, -1, 0, &kd) == 0) {
/* Decrypt the client key data and set its enctype to the request
* enctype (which may differ from the key data enctype for DES). */
ret = krb5_dbe_decrypt_key_data(context, NULL, kd, kb_out, NULL);
if (ret)
return ret;
kb_out->enctype = etype;
*kd_out = kd;
return 0;
}
}
return 0;
}
struct as_req_state {
loop_respond_fn respond;
void *arg;
krb5_principal_data client_princ;
krb5_enc_tkt_part enc_tkt_reply;
krb5_enc_kdc_rep_part reply_encpart;
krb5_ticket ticket_reply;
krb5_keyblock server_keyblock;
krb5_keyblock client_keyblock;
krb5_db_entry *client;
krb5_db_entry *server;
krb5_db_entry *local_tgt;
krb5_db_entry *local_tgt_storage;
krb5_key_data *client_key;
krb5_kdc_req *request;
struct krb5_kdcpreauth_rock_st rock;
const char *status;
krb5_pa_data **e_data;
krb5_boolean typed_e_data;
krb5_kdc_rep reply;
krb5_timestamp kdc_time;
krb5_timestamp authtime;
krb5_keyblock session_key;
unsigned int c_flags;
krb5_data *req_pkt;
krb5_data *inner_body;
struct kdc_request_state *rstate;
char *sname, *cname;
void *pa_context;
const krb5_fulladdr *local_addr;
const krb5_fulladdr *remote_addr;
krb5_data **auth_indicators;
krb5_error_code preauth_err;
kdc_realm_t *active_realm;
krb5_audit_state *au_state;
};
static void
finish_process_as_req(struct as_req_state *state, krb5_error_code errcode)
{
krb5_key_data *server_key;
krb5_keyblock *as_encrypting_key = NULL;
krb5_data *response = NULL;
const char *emsg = 0;
int did_log = 0;
loop_respond_fn oldrespond;
void *oldarg;
kdc_realm_t *kdc_active_realm = state->active_realm;
krb5_audit_state *au_state = state->au_state;
assert(state);
oldrespond = state->respond;
oldarg = state->arg;
if (errcode)
goto egress;
au_state->stage = ENCR_REP;
if ((errcode = validate_forwardable(state->request, *state->client,
*state->server, state->kdc_time,
&state->status))) {
errcode += ERROR_TABLE_BASE_krb5;
goto egress;
}
errcode = check_indicators(kdc_context, state->server,
state->auth_indicators);
if (errcode) {
state->status = "HIGHER_AUTHENTICATION_REQUIRED";
goto egress;
}
state->ticket_reply.enc_part2 = &state->enc_tkt_reply;
/*
* Find the server key
*/
if ((errcode = krb5_dbe_find_enctype(kdc_context, state->server,
-1, /* ignore keytype */
-1, /* Ignore salttype */
0, /* Get highest kvno */
&server_key))) {
state->status = "FINDING_SERVER_KEY";
goto egress;
}
/*
* Convert server->key into a real key
* (it may be encrypted in the database)
*
* server_keyblock is later used to generate auth data signatures
*/
if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL,
server_key,
&state->server_keyblock,
NULL))) {
state->status = "DECRYPT_SERVER_KEY";
goto egress;
}
/* Start assembling the response */
state->reply.msg_type = KRB5_AS_REP;
state->reply.client = state->enc_tkt_reply.client; /* post canonization */
state->reply.ticket = &state->ticket_reply;
state->reply_encpart.session = &state->session_key;
if ((errcode = fetch_last_req_info(state->client,
&state->reply_encpart.last_req))) {
state->status = "FETCH_LAST_REQ";
goto egress;
}
state->reply_encpart.nonce = state->request->nonce;
state->reply_encpart.key_exp = get_key_exp(state->client);
state->reply_encpart.flags = state->enc_tkt_reply.flags;
state->reply_encpart.server = state->ticket_reply.server;
/* copy the time fields EXCEPT for authtime; it's location
* is used for ktime
*/
state->reply_encpart.times = state->enc_tkt_reply.times;
state->reply_encpart.times.authtime = state->authtime = state->kdc_time;
state->reply_encpart.caddrs = state->enc_tkt_reply.caddrs;
state->reply_encpart.enc_padata = NULL;
/* Fetch the padata info to be returned (do this before
* authdata to handle possible replacement of reply key
*/
errcode = return_padata(kdc_context, &state->rock, state->req_pkt,
state->request, &state->reply,
&state->client_keyblock, &state->pa_context);
if (errcode) {
state->status = "KDC_RETURN_PADATA";
goto egress;
}
/* If we didn't find a client long-term key and no preauth mechanism
* replaced the reply key, error out now. */
if (state->client_keyblock.enctype == ENCTYPE_NULL) {
state->status = "CANT_FIND_CLIENT_KEY";
errcode = KRB5KDC_ERR_ETYPE_NOSUPP;
goto egress;
}
errcode = handle_authdata(kdc_context,
state->c_flags,
state->client,
state->server,
NULL,
state->local_tgt,
&state->client_keyblock,
&state->server_keyblock,
NULL,
state->req_pkt,
state->request,
NULL, /* for_user_princ */
NULL, /* enc_tkt_request */
state->auth_indicators,
&state->enc_tkt_reply);
if (errcode) {
krb5_klog_syslog(LOG_INFO, _("AS_REQ : handle_authdata (%d)"),
errcode);
state->status = "HANDLE_AUTHDATA";
goto egress;
}
errcode = krb5_encrypt_tkt_part(kdc_context, &state->server_keyblock,
&state->ticket_reply);
if (errcode) {
state->status = "ENCRYPT_TICKET";
goto egress;
}
errcode = kau_make_tkt_id(kdc_context, &state->ticket_reply,
&au_state->tkt_out_id);
if (errcode) {
state->status = "GENERATE_TICKET_ID";
goto egress;
}
state->ticket_reply.enc_part.kvno = server_key->key_data_kvno;
errcode = kdc_fast_response_handle_padata(state->rstate,
state->request,
&state->reply,
state->client_keyblock.enctype);
if (errcode) {
state->status = "MAKE_FAST_RESPONSE";
goto egress;
}
/* now encode/encrypt the response */
state->reply.enc_part.enctype = state->client_keyblock.enctype;
errcode = kdc_fast_handle_reply_key(state->rstate, &state->client_keyblock,
&as_encrypting_key);
if (errcode) {
state->status = "MAKE_FAST_REPLY_KEY";
goto egress;
}
errcode = return_enc_padata(kdc_context, state->req_pkt, state->request,
as_encrypting_key, state->server,
&state->reply_encpart, FALSE);
if (errcode) {
state->status = "KDC_RETURN_ENC_PADATA";
goto egress;
}
if (kdc_fast_hide_client(state->rstate))
state->reply.client = (krb5_principal)krb5_anonymous_principal();
errcode = krb5_encode_kdc_rep(kdc_context, KRB5_AS_REP,
&state->reply_encpart, 0,
as_encrypting_key,
&state->reply, &response);
if (state->client_key != NULL)
state->reply.enc_part.kvno = state->client_key->key_data_kvno;
if (errcode) {
state->status = "ENCODE_KDC_REP";
goto egress;
}
/* these parts are left on as a courtesy from krb5_encode_kdc_rep so we
can use them in raw form if needed. But, we don't... */
memset(state->reply.enc_part.ciphertext.data, 0,
state->reply.enc_part.ciphertext.length);
free(state->reply.enc_part.ciphertext.data);
log_as_req(kdc_context, state->local_addr, state->remote_addr,
state->request, &state->reply, state->client, state->cname,
state->server, state->sname, state->authtime, 0, 0, 0);
did_log = 1;
egress:
if (errcode != 0)
assert (state->status != 0);
au_state->status = state->status;
au_state->reply = &state->reply;
kau_as_req(kdc_context,
(errcode || state->preauth_err) ? FALSE : TRUE, au_state);
kau_free_kdc_req(au_state);
free_padata_context(kdc_context, state->pa_context);
if (as_encrypting_key)
krb5_free_keyblock(kdc_context, as_encrypting_key);
if (errcode)
emsg = krb5_get_error_message(kdc_context, errcode);
if (state->status) {
log_as_req(kdc_context, state->local_addr, state->remote_addr,
state->request, &state->reply, state->client,
state->cname, state->server, state->sname, state->authtime,
state->status, errcode, emsg);
did_log = 1;
}
if (errcode) {
if (state->status == 0) {
state->status = emsg;
}
if (errcode != KRB5KDC_ERR_DISCARD) {
errcode -= ERROR_TABLE_BASE_krb5;
if (errcode < 0 || errcode > KRB_ERR_MAX)
errcode = KRB_ERR_GENERIC;
errcode = prepare_error_as(state->rstate, state->request,
state->local_tgt, errcode,
state->e_data, state->typed_e_data,
((state->client != NULL) ?
state->client->princ : NULL),
&response, state->status);
state->status = 0;
}
}
if (emsg)
krb5_free_error_message(kdc_context, emsg);
if (state->enc_tkt_reply.authorization_data != NULL)
krb5_free_authdata(kdc_context,
state->enc_tkt_reply.authorization_data);
if (state->server_keyblock.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &state->server_keyblock);
if (state->client_keyblock.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &state->client_keyblock);
if (state->reply.padata != NULL)
krb5_free_pa_data(kdc_context, state->reply.padata);
if (state->reply_encpart.enc_padata)
krb5_free_pa_data(kdc_context, state->reply_encpart.enc_padata);
if (state->cname != NULL)
free(state->cname);
if (state->sname != NULL)
free(state->sname);
krb5_db_free_principal(kdc_context, state->client);
krb5_db_free_principal(kdc_context, state->server);
krb5_db_free_principal(kdc_context, state->local_tgt_storage);
if (state->session_key.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &state->session_key);
if (state->ticket_reply.enc_part.ciphertext.data != NULL) {
memset(state->ticket_reply.enc_part.ciphertext.data , 0,
state->ticket_reply.enc_part.ciphertext.length);
free(state->ticket_reply.enc_part.ciphertext.data);
}
krb5_free_pa_data(kdc_context, state->e_data);
krb5_free_data(kdc_context, state->inner_body);
kdc_free_rstate(state->rstate);
krb5_free_kdc_req(kdc_context, state->request);
k5_free_data_ptr_list(state->auth_indicators);
assert(did_log != 0);
free(state);
(*oldrespond)(oldarg, errcode, response);
}
static void
finish_missing_required_preauth(void *arg)
{
struct as_req_state *state = (struct as_req_state *)arg;
finish_process_as_req(state, state->preauth_err);
}
static void
finish_preauth(void *arg, krb5_error_code code)
{
struct as_req_state *state = arg;
krb5_error_code real_code = code;
if (code) {
if (vague_errors)
code = KRB5KRB_ERR_GENERIC;
state->status = "PREAUTH_FAILED";
if (real_code == KRB5KDC_ERR_PREAUTH_FAILED) {
state->preauth_err = code;
get_preauth_hint_list(state->request, &state->rock, &state->e_data,
finish_missing_required_preauth, state);
return;
}
} else {
/*
* Final check before handing out ticket: If the client requires
* preauthentication, verify that the proper kind of
* preauthentication was carried out.
*/
state->status = missing_required_preauth(state->client, state->server,
&state->enc_tkt_reply);
if (state->status) {
state->preauth_err = KRB5KDC_ERR_PREAUTH_REQUIRED;
get_preauth_hint_list(state->request, &state->rock, &state->e_data,
finish_missing_required_preauth, state);
return;
}
}
finish_process_as_req(state, code);
}
/*ARGSUSED*/
void
process_as_req(krb5_kdc_req *request, krb5_data *req_pkt,
const krb5_fulladdr *local_addr,
const krb5_fulladdr *remote_addr, kdc_realm_t *kdc_active_realm,
verto_ctx *vctx, loop_respond_fn respond, void *arg)
{
krb5_error_code errcode;
unsigned int s_flags = 0;
krb5_data encoded_req_body;
krb5_enctype useenctype;
struct as_req_state *state;
krb5_audit_state *au_state = NULL;
state = k5alloc(sizeof(*state), &errcode);
if (state == NULL) {
(*respond)(arg, errcode, NULL);
return;
}
state->respond = respond;
state->arg = arg;
state->request = request;
state->req_pkt = req_pkt;
state->local_addr = local_addr;
state->remote_addr = remote_addr;
state->active_realm = kdc_active_realm;
errcode = kdc_make_rstate(kdc_active_realm, &state->rstate);
if (errcode != 0) {
(*respond)(arg, errcode, NULL);
free(state);
return;
}
/* Initialize audit state. */
errcode = kau_init_kdc_req(kdc_context, state->request, remote_addr,
&au_state);
if (errcode) {
(*respond)(arg, errcode, NULL);
kdc_free_rstate(state->rstate);
free(state);
return;
}
state->au_state = au_state;
if (state->request->msg_type != KRB5_AS_REQ) {
state->status = "VALIDATE_MESSAGE_TYPE";
errcode = KRB5_BADMSGTYPE;
goto errout;
}
/* Seed the audit trail with the request ID and basic information. */
kau_as_req(kdc_context, TRUE, au_state);
if (fetch_asn1_field((unsigned char *) req_pkt->data,
1, 4, &encoded_req_body) != 0) {
errcode = ASN1_BAD_ID;
state->status = "FETCH_REQ_BODY";
goto errout;
}
errcode = kdc_find_fast(&state->request, &encoded_req_body, NULL, NULL,
state->rstate, &state->inner_body);
if (errcode) {
state->status = "FIND_FAST";
goto errout;
}
if (state->inner_body == NULL) {
/* Not a FAST request; copy the encoded request body. */
errcode = krb5_copy_data(kdc_context, &encoded_req_body,
&state->inner_body);
if (errcode) {
state->status = "COPY_REQ_BODY";
goto errout;
}
}
au_state->request = state->request;
state->rock.request = state->request;
state->rock.inner_body = state->inner_body;
state->rock.rstate = state->rstate;
state->rock.vctx = vctx;
state->rock.auth_indicators = &state->auth_indicators;
if (!state->request->client) {
state->status = "NULL_CLIENT";
errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto errout;
}
if ((errcode = krb5_unparse_name(kdc_context,
state->request->client,
&state->cname))) {
state->status = "UNPARSE_CLIENT";
goto errout;
}
limit_string(state->cname);
if (!state->request->server) {
state->status = "NULL_SERVER";
errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto errout;
}
if ((errcode = krb5_unparse_name(kdc_context,
state->request->server,
&state->sname))) {
state->status = "UNPARSE_SERVER";
goto errout;
}
limit_string(state->sname);
/*
* We set KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY as a hint
* to the backend to return naming information in lieu
* of cross realm TGS entries.
*/
setflag(state->c_flags, KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY);
/*
* Note that according to the referrals draft we should
* always canonicalize enterprise principal names.
*/
if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE) ||
state->request->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) {
setflag(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE);
setflag(state->c_flags, KRB5_KDB_FLAG_ALIAS_OK);
}
if (include_pac_p(kdc_context, state->request)) {
setflag(state->c_flags, KRB5_KDB_FLAG_INCLUDE_PAC);
}
errcode = krb5_db_get_principal(kdc_context, state->request->client,
state->c_flags, &state->client);
if (errcode == KRB5_KDB_CANTLOCK_DB)
errcode = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (errcode == KRB5_KDB_NOENTRY) {
state->status = "CLIENT_NOT_FOUND";
if (vague_errors)
errcode = KRB5KRB_ERR_GENERIC;
else
errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto errout;
} else if (errcode) {
state->status = "LOOKING_UP_CLIENT";
goto errout;
}
state->rock.client = state->client;
/*
* If the backend returned a principal that is not in the local
* realm, then we need to refer the client to that realm.
*/
if (!is_local_principal(kdc_active_realm, state->client->princ)) {
/* Entry is a referral to another realm */
state->status = "REFERRAL";
au_state->cl_realm = &state->client->princ->realm;
errcode = KRB5KDC_ERR_WRONG_REALM;
goto errout;
}
au_state->stage = SRVC_PRINC;
s_flags = 0;
setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK);
if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE)) {
setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE);
}
errcode = krb5_db_get_principal(kdc_context, state->request->server,
s_flags, &state->server);
if (errcode == KRB5_KDB_CANTLOCK_DB)
errcode = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (errcode == KRB5_KDB_NOENTRY) {
state->status = "SERVER_NOT_FOUND";
errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto errout;
} else if (errcode) {
state->status = "LOOKING_UP_SERVER";
goto errout;
}
errcode = get_local_tgt(kdc_context, &state->request->server->realm,
state->server, &state->local_tgt,
&state->local_tgt_storage);
if (errcode) {
state->status = "GET_LOCAL_TGT";
goto errout;
}
au_state->stage = VALIDATE_POL;
if ((errcode = krb5_timeofday(kdc_context, &state->kdc_time))) {
state->status = "TIMEOFDAY";
goto errout;
}
state->authtime = state->kdc_time; /* for audit_as_request() */
if ((errcode = validate_as_request(kdc_active_realm,
state->request, *state->client,
*state->server, state->kdc_time,
&state->status, &state->e_data))) {
if (!state->status)
state->status = "UNKNOWN_REASON";
errcode += ERROR_TABLE_BASE_krb5;
goto errout;
}
au_state->stage = ISSUE_TKT;
/*
* Select the keytype for the ticket session key.
*/
if ((useenctype = select_session_keytype(kdc_active_realm, state->server,
state->request->nktypes,
state->request->ktype)) == 0) {
/* unsupported ktype */
state->status = "BAD_ENCRYPTION_TYPE";
errcode = KRB5KDC_ERR_ETYPE_NOSUPP;
goto errout;
}
if ((errcode = krb5_c_make_random_key(kdc_context, useenctype,
&state->session_key))) {
state->status = "MAKE_RANDOM_KEY";
goto errout;
}
/*
* Canonicalization is only effective if we are issuing a TGT
* (the intention is to allow support for Windows "short" realm
* aliases, nothing more).
*/
if (isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE) &&
krb5_is_tgs_principal(state->request->server) &&
krb5_is_tgs_principal(state->server->princ)) {
state->ticket_reply.server = state->server->princ;
} else {
state->ticket_reply.server = state->request->server;
}
/* Copy options that request the corresponding ticket flags. */
state->enc_tkt_reply.flags = OPTS2FLAGS(state->request->kdc_options);
state->enc_tkt_reply.times.authtime = state->authtime;
setflag(state->enc_tkt_reply.flags, TKT_FLG_INITIAL);
setflag(state->enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP);
/*
* It should be noted that local policy may affect the
* processing of any of these flags. For example, some
* realms may refuse to issue renewable tickets
*/
state->enc_tkt_reply.session = &state->session_key;
if (isflagset(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE)) {
state->client_princ = *(state->client->princ);
} else {
state->client_princ = *(state->request->client);
/* The realm is always canonicalized */
state->client_princ.realm = state->client->princ->realm;
}
state->enc_tkt_reply.client = &state->client_princ;
state->enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
state->enc_tkt_reply.transited.tr_contents = empty_string;
if (isflagset(state->request->kdc_options, KDC_OPT_POSTDATED)) {
setflag(state->enc_tkt_reply.flags, TKT_FLG_INVALID);
state->enc_tkt_reply.times.starttime = state->request->from;
} else
state->enc_tkt_reply.times.starttime = state->kdc_time;
kdc_get_ticket_endtime(kdc_active_realm,
state->enc_tkt_reply.times.starttime,
kdc_infinity, state->request->till, state->client,
state->server, &state->enc_tkt_reply.times.endtime);
kdc_get_ticket_renewtime(kdc_active_realm, state->request, NULL,
state->client, state->server,
&state->enc_tkt_reply);
/*
* starttime is optional, and treated as authtime if not present.
* so we can nuke it if it matches
*/
if (state->enc_tkt_reply.times.starttime ==
state->enc_tkt_reply.times.authtime)
state->enc_tkt_reply.times.starttime = 0;
state->enc_tkt_reply.caddrs = state->request->addresses;
state->enc_tkt_reply.authorization_data = 0;
/* If anonymous requests are being used, adjust the realm of the client
* principal. */
if (isflagset(state->request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS)) {
if (!krb5_principal_compare_any_realm(kdc_context,
state->request->client,
krb5_anonymous_principal())) {
errcode = KRB5KDC_ERR_BADOPTION;
/* Anonymous requested but anonymous principal not used.*/
state->status = "VALIDATE_ANONYMOUS_PRINCIPAL";
goto errout;
}
krb5_free_principal(kdc_context, state->request->client);
state->request->client = NULL;
errcode = krb5_copy_principal(kdc_context, krb5_anonymous_principal(),
&state->request->client);
if (errcode) {
state->status = "COPY_ANONYMOUS_PRINCIPAL";
goto errout;
}
state->enc_tkt_reply.client = state->request->client;
setflag(state->client->attributes, KRB5_KDB_REQUIRES_PRE_AUTH);
}
errcode = select_client_key(kdc_context, state->client,
state->request->ktype, state->request->nktypes,
&state->client_keyblock, &state->client_key);
if (errcode) {
state->status = "DECRYPT_CLIENT_KEY";
goto errout;
}
if (state->client_key != NULL) {
state->rock.client_key = state->client_key;
state->rock.client_keyblock = &state->client_keyblock;
}
errcode = kdc_fast_read_cookie(kdc_context, state->rstate, state->request,
state->local_tgt);
if (errcode) {
state->status = "READ_COOKIE";
goto errout;
}
/*
* Check the preauthentication if it is there.
*/
if (state->request->padata) {
check_padata(kdc_context, &state->rock, state->req_pkt,
state->request, &state->enc_tkt_reply, &state->pa_context,
&state->e_data, &state->typed_e_data, finish_preauth,
state);
} else
finish_preauth(state, 0);
return;
errout:
finish_process_as_req(state, errcode);
}
static krb5_error_code
prepare_error_as(struct kdc_request_state *rstate, krb5_kdc_req *request,
krb5_db_entry *local_tgt, int error, krb5_pa_data **e_data_in,
krb5_boolean typed_e_data, krb5_principal canon_client,
krb5_data **response, const char *status)
{
krb5_error errpkt;
krb5_error_code retval;
krb5_data *scratch = NULL, *e_data_asn1 = NULL, *fast_edata = NULL;
krb5_pa_data **e_data = NULL, *cookie = NULL;
kdc_realm_t *kdc_active_realm = rstate->realm_data;
size_t count;
errpkt.magic = KV5M_ERROR;
if (e_data_in != NULL) {
/* Add a PA-FX-COOKIE to e_data_in. e_data is a shallow copy
* containing aliases. */
for (count = 0; e_data_in[count] != NULL; count++);
e_data = calloc(count + 2, sizeof(*e_data));
if (e_data == NULL)
return ENOMEM;
memcpy(e_data, e_data_in, count * sizeof(*e_data));
retval = kdc_fast_make_cookie(kdc_context, rstate, local_tgt,
request->client, &cookie);
e_data[count] = cookie;
}
errpkt.ctime = request->nonce;
errpkt.cusec = 0;
retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec);
if (retval)
goto cleanup;
errpkt.error = error;
errpkt.server = request->server;
errpkt.client = (error == KDC_ERR_WRONG_REALM) ? canon_client :
request->client;
errpkt.text = string2data((char *)status);
if (e_data != NULL) {
if (typed_e_data)
retval = encode_krb5_typed_data(e_data, &e_data_asn1);
else
retval = encode_krb5_padata_sequence(e_data, &e_data_asn1);
if (retval)
goto cleanup;
errpkt.e_data = *e_data_asn1;
} else
errpkt.e_data = empty_data();
retval = kdc_fast_handle_error(kdc_context, rstate, request, e_data,
&errpkt, &fast_edata);
if (retval)
goto cleanup;
if (fast_edata != NULL)
errpkt.e_data = *fast_edata;
scratch = k5alloc(sizeof(*scratch), &retval);
if (scratch == NULL)
goto cleanup;
if (kdc_fast_hide_client(rstate) && errpkt.client != NULL)
errpkt.client = (krb5_principal)krb5_anonymous_principal();
retval = krb5_mk_error(kdc_context, &errpkt, scratch);
if (retval)
goto cleanup;
*response = scratch;
scratch = NULL;
cleanup:
krb5_free_data(kdc_context, fast_edata);
krb5_free_data(kdc_context, e_data_asn1);
free(scratch);
free(e_data);
if (cookie != NULL)
free(cookie->contents);
free(cookie);
return retval;
}
| /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* kdc/do_as_req.c */
/*
* Portions Copyright (C) 2007 Apple Inc.
* Copyright 1990, 1991, 2007, 2008, 2009, 2013, 2014 by the
* Massachusetts Institute of Technology. All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
*
* KDC Routines to deal with AS_REQ's
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "k5-int.h"
#include "com_err.h"
#include <syslog.h>
#ifdef HAVE_NETINET_IN_H
#include <sys/types.h>
#include <netinet/in.h>
#ifndef hpux
#include <arpa/inet.h>
#endif /* hpux */
#endif /* HAVE_NETINET_IN_H */
#include "kdc_util.h"
#include "kdc_audit.h"
#include "policy.h"
#include <kadm5/admin.h>
#include "adm_proto.h"
#include "extern.h"
static krb5_error_code
prepare_error_as(struct kdc_request_state *, krb5_kdc_req *, krb5_db_entry *,
int, krb5_pa_data **, krb5_boolean, krb5_principal,
krb5_data **, const char *);
/* Determine the key-expiration value according to RFC 4120 section 5.4.2. */
static krb5_timestamp
get_key_exp(krb5_db_entry *entry)
{
if (entry->expiration == 0)
return entry->pw_expiration;
if (entry->pw_expiration == 0)
return entry->expiration;
return ts_min(entry->expiration, entry->pw_expiration);
}
/*
* Find the key in client for the most preferred enctype in req_enctypes. Fill
* in *kb_out with the decrypted keyblock (which the caller must free) and set
* *kd_out to an alias to that key data entry. Set *kd_out to NULL and leave
* *kb_out zeroed if no key is found for any of the requested enctypes.
* kb_out->enctype may differ from the enctype of *kd_out for DES enctypes; in
* this case, kb_out->enctype is the requested enctype used to match the key
* data entry.
*/
static krb5_error_code
select_client_key(krb5_context context, krb5_db_entry *client,
krb5_enctype *req_enctypes, int n_req_enctypes,
krb5_keyblock *kb_out, krb5_key_data **kd_out)
{
krb5_error_code ret;
krb5_key_data *kd;
krb5_enctype etype;
int i;
memset(kb_out, 0, sizeof(*kb_out));
*kd_out = NULL;
for (i = 0; i < n_req_enctypes; i++) {
etype = req_enctypes[i];
if (!krb5_c_valid_enctype(etype))
continue;
if (krb5_dbe_find_enctype(context, client, etype, -1, 0, &kd) == 0) {
/* Decrypt the client key data and set its enctype to the request
* enctype (which may differ from the key data enctype for DES). */
ret = krb5_dbe_decrypt_key_data(context, NULL, kd, kb_out, NULL);
if (ret)
return ret;
kb_out->enctype = etype;
*kd_out = kd;
return 0;
}
}
return 0;
}
struct as_req_state {
loop_respond_fn respond;
void *arg;
krb5_principal_data client_princ;
krb5_enc_tkt_part enc_tkt_reply;
krb5_enc_kdc_rep_part reply_encpart;
krb5_ticket ticket_reply;
krb5_keyblock server_keyblock;
krb5_keyblock client_keyblock;
krb5_db_entry *client;
krb5_db_entry *server;
krb5_db_entry *local_tgt;
krb5_db_entry *local_tgt_storage;
krb5_key_data *client_key;
krb5_kdc_req *request;
struct krb5_kdcpreauth_rock_st rock;
const char *status;
krb5_pa_data **e_data;
krb5_boolean typed_e_data;
krb5_kdc_rep reply;
krb5_timestamp kdc_time;
krb5_timestamp authtime;
krb5_keyblock session_key;
unsigned int c_flags;
krb5_data *req_pkt;
krb5_data *inner_body;
struct kdc_request_state *rstate;
char *sname, *cname;
void *pa_context;
const krb5_fulladdr *local_addr;
const krb5_fulladdr *remote_addr;
krb5_data **auth_indicators;
krb5_error_code preauth_err;
kdc_realm_t *active_realm;
krb5_audit_state *au_state;
};
static void
finish_process_as_req(struct as_req_state *state, krb5_error_code errcode)
{
krb5_key_data *server_key;
krb5_keyblock *as_encrypting_key = NULL;
krb5_data *response = NULL;
const char *emsg = 0;
int did_log = 0;
loop_respond_fn oldrespond;
void *oldarg;
kdc_realm_t *kdc_active_realm = state->active_realm;
krb5_audit_state *au_state = state->au_state;
assert(state);
oldrespond = state->respond;
oldarg = state->arg;
if (errcode)
goto egress;
au_state->stage = ENCR_REP;
if ((errcode = validate_forwardable(state->request, *state->client,
*state->server, state->kdc_time,
&state->status))) {
errcode += ERROR_TABLE_BASE_krb5;
goto egress;
}
errcode = check_indicators(kdc_context, state->server,
state->auth_indicators);
if (errcode) {
state->status = "HIGHER_AUTHENTICATION_REQUIRED";
goto egress;
}
state->ticket_reply.enc_part2 = &state->enc_tkt_reply;
/*
* Find the server key
*/
if ((errcode = krb5_dbe_find_enctype(kdc_context, state->server,
-1, /* ignore keytype */
-1, /* Ignore salttype */
0, /* Get highest kvno */
&server_key))) {
state->status = "FINDING_SERVER_KEY";
goto egress;
}
/*
* Convert server->key into a real key
* (it may be encrypted in the database)
*
* server_keyblock is later used to generate auth data signatures
*/
if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL,
server_key,
&state->server_keyblock,
NULL))) {
state->status = "DECRYPT_SERVER_KEY";
goto egress;
}
/* Start assembling the response */
state->reply.msg_type = KRB5_AS_REP;
state->reply.client = state->enc_tkt_reply.client; /* post canonization */
state->reply.ticket = &state->ticket_reply;
state->reply_encpart.session = &state->session_key;
if ((errcode = fetch_last_req_info(state->client,
&state->reply_encpart.last_req))) {
state->status = "FETCH_LAST_REQ";
goto egress;
}
state->reply_encpart.nonce = state->request->nonce;
state->reply_encpart.key_exp = get_key_exp(state->client);
state->reply_encpart.flags = state->enc_tkt_reply.flags;
state->reply_encpart.server = state->ticket_reply.server;
/* copy the time fields EXCEPT for authtime; it's location
* is used for ktime
*/
state->reply_encpart.times = state->enc_tkt_reply.times;
state->reply_encpart.times.authtime = state->authtime = state->kdc_time;
state->reply_encpart.caddrs = state->enc_tkt_reply.caddrs;
state->reply_encpart.enc_padata = NULL;
/* Fetch the padata info to be returned (do this before
* authdata to handle possible replacement of reply key
*/
errcode = return_padata(kdc_context, &state->rock, state->req_pkt,
state->request, &state->reply,
&state->client_keyblock, &state->pa_context);
if (errcode) {
state->status = "KDC_RETURN_PADATA";
goto egress;
}
/* If we didn't find a client long-term key and no preauth mechanism
* replaced the reply key, error out now. */
if (state->client_keyblock.enctype == ENCTYPE_NULL) {
state->status = "CANT_FIND_CLIENT_KEY";
errcode = KRB5KDC_ERR_ETYPE_NOSUPP;
goto egress;
}
errcode = handle_authdata(kdc_context,
state->c_flags,
state->client,
state->server,
NULL,
state->local_tgt,
&state->client_keyblock,
&state->server_keyblock,
NULL,
state->req_pkt,
state->request,
NULL, /* for_user_princ */
NULL, /* enc_tkt_request */
state->auth_indicators,
&state->enc_tkt_reply);
if (errcode) {
krb5_klog_syslog(LOG_INFO, _("AS_REQ : handle_authdata (%d)"),
errcode);
state->status = "HANDLE_AUTHDATA";
goto egress;
}
errcode = krb5_encrypt_tkt_part(kdc_context, &state->server_keyblock,
&state->ticket_reply);
if (errcode) {
state->status = "ENCRYPT_TICKET";
goto egress;
}
errcode = kau_make_tkt_id(kdc_context, &state->ticket_reply,
&au_state->tkt_out_id);
if (errcode) {
state->status = "GENERATE_TICKET_ID";
goto egress;
}
state->ticket_reply.enc_part.kvno = server_key->key_data_kvno;
errcode = kdc_fast_response_handle_padata(state->rstate,
state->request,
&state->reply,
state->client_keyblock.enctype);
if (errcode) {
state->status = "MAKE_FAST_RESPONSE";
goto egress;
}
/* now encode/encrypt the response */
state->reply.enc_part.enctype = state->client_keyblock.enctype;
errcode = kdc_fast_handle_reply_key(state->rstate, &state->client_keyblock,
&as_encrypting_key);
if (errcode) {
state->status = "MAKE_FAST_REPLY_KEY";
goto egress;
}
errcode = return_enc_padata(kdc_context, state->req_pkt, state->request,
as_encrypting_key, state->server,
&state->reply_encpart, FALSE);
if (errcode) {
state->status = "KDC_RETURN_ENC_PADATA";
goto egress;
}
if (kdc_fast_hide_client(state->rstate))
state->reply.client = (krb5_principal)krb5_anonymous_principal();
errcode = krb5_encode_kdc_rep(kdc_context, KRB5_AS_REP,
&state->reply_encpart, 0,
as_encrypting_key,
&state->reply, &response);
if (state->client_key != NULL)
state->reply.enc_part.kvno = state->client_key->key_data_kvno;
if (errcode) {
state->status = "ENCODE_KDC_REP";
goto egress;
}
/* these parts are left on as a courtesy from krb5_encode_kdc_rep so we
can use them in raw form if needed. But, we don't... */
memset(state->reply.enc_part.ciphertext.data, 0,
state->reply.enc_part.ciphertext.length);
free(state->reply.enc_part.ciphertext.data);
log_as_req(kdc_context, state->local_addr, state->remote_addr,
state->request, &state->reply, state->client, state->cname,
state->server, state->sname, state->authtime, 0, 0, 0);
did_log = 1;
egress:
if (errcode != 0 && state->status == NULL)
state->status = "UNKNOWN_REASON";
au_state->status = state->status;
au_state->reply = &state->reply;
kau_as_req(kdc_context,
(errcode || state->preauth_err) ? FALSE : TRUE, au_state);
kau_free_kdc_req(au_state);
free_padata_context(kdc_context, state->pa_context);
if (as_encrypting_key)
krb5_free_keyblock(kdc_context, as_encrypting_key);
if (errcode)
emsg = krb5_get_error_message(kdc_context, errcode);
if (state->status) {
log_as_req(kdc_context, state->local_addr, state->remote_addr,
state->request, &state->reply, state->client,
state->cname, state->server, state->sname, state->authtime,
state->status, errcode, emsg);
did_log = 1;
}
if (errcode) {
if (state->status == 0) {
state->status = emsg;
}
if (errcode != KRB5KDC_ERR_DISCARD) {
errcode -= ERROR_TABLE_BASE_krb5;
if (errcode < 0 || errcode > KRB_ERR_MAX)
errcode = KRB_ERR_GENERIC;
errcode = prepare_error_as(state->rstate, state->request,
state->local_tgt, errcode,
state->e_data, state->typed_e_data,
((state->client != NULL) ?
state->client->princ : NULL),
&response, state->status);
state->status = 0;
}
}
if (emsg)
krb5_free_error_message(kdc_context, emsg);
if (state->enc_tkt_reply.authorization_data != NULL)
krb5_free_authdata(kdc_context,
state->enc_tkt_reply.authorization_data);
if (state->server_keyblock.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &state->server_keyblock);
if (state->client_keyblock.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &state->client_keyblock);
if (state->reply.padata != NULL)
krb5_free_pa_data(kdc_context, state->reply.padata);
if (state->reply_encpart.enc_padata)
krb5_free_pa_data(kdc_context, state->reply_encpart.enc_padata);
if (state->cname != NULL)
free(state->cname);
if (state->sname != NULL)
free(state->sname);
krb5_db_free_principal(kdc_context, state->client);
krb5_db_free_principal(kdc_context, state->server);
krb5_db_free_principal(kdc_context, state->local_tgt_storage);
if (state->session_key.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &state->session_key);
if (state->ticket_reply.enc_part.ciphertext.data != NULL) {
memset(state->ticket_reply.enc_part.ciphertext.data , 0,
state->ticket_reply.enc_part.ciphertext.length);
free(state->ticket_reply.enc_part.ciphertext.data);
}
krb5_free_pa_data(kdc_context, state->e_data);
krb5_free_data(kdc_context, state->inner_body);
kdc_free_rstate(state->rstate);
krb5_free_kdc_req(kdc_context, state->request);
k5_free_data_ptr_list(state->auth_indicators);
assert(did_log != 0);
free(state);
(*oldrespond)(oldarg, errcode, response);
}
static void
finish_missing_required_preauth(void *arg)
{
struct as_req_state *state = (struct as_req_state *)arg;
finish_process_as_req(state, state->preauth_err);
}
static void
finish_preauth(void *arg, krb5_error_code code)
{
struct as_req_state *state = arg;
krb5_error_code real_code = code;
if (code) {
if (vague_errors)
code = KRB5KRB_ERR_GENERIC;
state->status = "PREAUTH_FAILED";
if (real_code == KRB5KDC_ERR_PREAUTH_FAILED) {
state->preauth_err = code;
get_preauth_hint_list(state->request, &state->rock, &state->e_data,
finish_missing_required_preauth, state);
return;
}
} else {
/*
* Final check before handing out ticket: If the client requires
* preauthentication, verify that the proper kind of
* preauthentication was carried out.
*/
state->status = missing_required_preauth(state->client, state->server,
&state->enc_tkt_reply);
if (state->status) {
state->preauth_err = KRB5KDC_ERR_PREAUTH_REQUIRED;
get_preauth_hint_list(state->request, &state->rock, &state->e_data,
finish_missing_required_preauth, state);
return;
}
}
finish_process_as_req(state, code);
}
/*ARGSUSED*/
void
process_as_req(krb5_kdc_req *request, krb5_data *req_pkt,
const krb5_fulladdr *local_addr,
const krb5_fulladdr *remote_addr, kdc_realm_t *kdc_active_realm,
verto_ctx *vctx, loop_respond_fn respond, void *arg)
{
krb5_error_code errcode;
unsigned int s_flags = 0;
krb5_data encoded_req_body;
krb5_enctype useenctype;
struct as_req_state *state;
krb5_audit_state *au_state = NULL;
state = k5alloc(sizeof(*state), &errcode);
if (state == NULL) {
(*respond)(arg, errcode, NULL);
return;
}
state->respond = respond;
state->arg = arg;
state->request = request;
state->req_pkt = req_pkt;
state->local_addr = local_addr;
state->remote_addr = remote_addr;
state->active_realm = kdc_active_realm;
errcode = kdc_make_rstate(kdc_active_realm, &state->rstate);
if (errcode != 0) {
(*respond)(arg, errcode, NULL);
free(state);
return;
}
/* Initialize audit state. */
errcode = kau_init_kdc_req(kdc_context, state->request, remote_addr,
&au_state);
if (errcode) {
(*respond)(arg, errcode, NULL);
kdc_free_rstate(state->rstate);
free(state);
return;
}
state->au_state = au_state;
if (state->request->msg_type != KRB5_AS_REQ) {
state->status = "VALIDATE_MESSAGE_TYPE";
errcode = KRB5_BADMSGTYPE;
goto errout;
}
/* Seed the audit trail with the request ID and basic information. */
kau_as_req(kdc_context, TRUE, au_state);
if (fetch_asn1_field((unsigned char *) req_pkt->data,
1, 4, &encoded_req_body) != 0) {
errcode = ASN1_BAD_ID;
state->status = "FETCH_REQ_BODY";
goto errout;
}
errcode = kdc_find_fast(&state->request, &encoded_req_body, NULL, NULL,
state->rstate, &state->inner_body);
if (errcode) {
state->status = "FIND_FAST";
goto errout;
}
if (state->inner_body == NULL) {
/* Not a FAST request; copy the encoded request body. */
errcode = krb5_copy_data(kdc_context, &encoded_req_body,
&state->inner_body);
if (errcode) {
state->status = "COPY_REQ_BODY";
goto errout;
}
}
au_state->request = state->request;
state->rock.request = state->request;
state->rock.inner_body = state->inner_body;
state->rock.rstate = state->rstate;
state->rock.vctx = vctx;
state->rock.auth_indicators = &state->auth_indicators;
if (!state->request->client) {
state->status = "NULL_CLIENT";
errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto errout;
}
if ((errcode = krb5_unparse_name(kdc_context,
state->request->client,
&state->cname))) {
state->status = "UNPARSE_CLIENT";
goto errout;
}
limit_string(state->cname);
if (!state->request->server) {
state->status = "NULL_SERVER";
errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto errout;
}
if ((errcode = krb5_unparse_name(kdc_context,
state->request->server,
&state->sname))) {
state->status = "UNPARSE_SERVER";
goto errout;
}
limit_string(state->sname);
/*
* We set KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY as a hint
* to the backend to return naming information in lieu
* of cross realm TGS entries.
*/
setflag(state->c_flags, KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY);
/*
* Note that according to the referrals draft we should
* always canonicalize enterprise principal names.
*/
if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE) ||
state->request->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) {
setflag(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE);
setflag(state->c_flags, KRB5_KDB_FLAG_ALIAS_OK);
}
if (include_pac_p(kdc_context, state->request)) {
setflag(state->c_flags, KRB5_KDB_FLAG_INCLUDE_PAC);
}
errcode = krb5_db_get_principal(kdc_context, state->request->client,
state->c_flags, &state->client);
if (errcode == KRB5_KDB_CANTLOCK_DB)
errcode = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (errcode == KRB5_KDB_NOENTRY) {
state->status = "CLIENT_NOT_FOUND";
if (vague_errors)
errcode = KRB5KRB_ERR_GENERIC;
else
errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN;
goto errout;
} else if (errcode) {
state->status = "LOOKING_UP_CLIENT";
goto errout;
}
state->rock.client = state->client;
/*
* If the backend returned a principal that is not in the local
* realm, then we need to refer the client to that realm.
*/
if (!is_local_principal(kdc_active_realm, state->client->princ)) {
/* Entry is a referral to another realm */
state->status = "REFERRAL";
au_state->cl_realm = &state->client->princ->realm;
errcode = KRB5KDC_ERR_WRONG_REALM;
goto errout;
}
au_state->stage = SRVC_PRINC;
s_flags = 0;
setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK);
if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE)) {
setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE);
}
errcode = krb5_db_get_principal(kdc_context, state->request->server,
s_flags, &state->server);
if (errcode == KRB5_KDB_CANTLOCK_DB)
errcode = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (errcode == KRB5_KDB_NOENTRY) {
state->status = "SERVER_NOT_FOUND";
errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto errout;
} else if (errcode) {
state->status = "LOOKING_UP_SERVER";
goto errout;
}
errcode = get_local_tgt(kdc_context, &state->request->server->realm,
state->server, &state->local_tgt,
&state->local_tgt_storage);
if (errcode) {
state->status = "GET_LOCAL_TGT";
goto errout;
}
au_state->stage = VALIDATE_POL;
if ((errcode = krb5_timeofday(kdc_context, &state->kdc_time))) {
state->status = "TIMEOFDAY";
goto errout;
}
state->authtime = state->kdc_time; /* for audit_as_request() */
if ((errcode = validate_as_request(kdc_active_realm,
state->request, *state->client,
*state->server, state->kdc_time,
&state->status, &state->e_data))) {
if (!state->status)
state->status = "UNKNOWN_REASON";
errcode += ERROR_TABLE_BASE_krb5;
goto errout;
}
au_state->stage = ISSUE_TKT;
/*
* Select the keytype for the ticket session key.
*/
if ((useenctype = select_session_keytype(kdc_active_realm, state->server,
state->request->nktypes,
state->request->ktype)) == 0) {
/* unsupported ktype */
state->status = "BAD_ENCRYPTION_TYPE";
errcode = KRB5KDC_ERR_ETYPE_NOSUPP;
goto errout;
}
if ((errcode = krb5_c_make_random_key(kdc_context, useenctype,
&state->session_key))) {
state->status = "MAKE_RANDOM_KEY";
goto errout;
}
/*
* Canonicalization is only effective if we are issuing a TGT
* (the intention is to allow support for Windows "short" realm
* aliases, nothing more).
*/
if (isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE) &&
krb5_is_tgs_principal(state->request->server) &&
krb5_is_tgs_principal(state->server->princ)) {
state->ticket_reply.server = state->server->princ;
} else {
state->ticket_reply.server = state->request->server;
}
/* Copy options that request the corresponding ticket flags. */
state->enc_tkt_reply.flags = OPTS2FLAGS(state->request->kdc_options);
state->enc_tkt_reply.times.authtime = state->authtime;
setflag(state->enc_tkt_reply.flags, TKT_FLG_INITIAL);
setflag(state->enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP);
/*
* It should be noted that local policy may affect the
* processing of any of these flags. For example, some
* realms may refuse to issue renewable tickets
*/
state->enc_tkt_reply.session = &state->session_key;
if (isflagset(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE)) {
state->client_princ = *(state->client->princ);
} else {
state->client_princ = *(state->request->client);
/* The realm is always canonicalized */
state->client_princ.realm = state->client->princ->realm;
}
state->enc_tkt_reply.client = &state->client_princ;
state->enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
state->enc_tkt_reply.transited.tr_contents = empty_string;
if (isflagset(state->request->kdc_options, KDC_OPT_POSTDATED)) {
setflag(state->enc_tkt_reply.flags, TKT_FLG_INVALID);
state->enc_tkt_reply.times.starttime = state->request->from;
} else
state->enc_tkt_reply.times.starttime = state->kdc_time;
kdc_get_ticket_endtime(kdc_active_realm,
state->enc_tkt_reply.times.starttime,
kdc_infinity, state->request->till, state->client,
state->server, &state->enc_tkt_reply.times.endtime);
kdc_get_ticket_renewtime(kdc_active_realm, state->request, NULL,
state->client, state->server,
&state->enc_tkt_reply);
/*
* starttime is optional, and treated as authtime if not present.
* so we can nuke it if it matches
*/
if (state->enc_tkt_reply.times.starttime ==
state->enc_tkt_reply.times.authtime)
state->enc_tkt_reply.times.starttime = 0;
state->enc_tkt_reply.caddrs = state->request->addresses;
state->enc_tkt_reply.authorization_data = 0;
/* If anonymous requests are being used, adjust the realm of the client
* principal. */
if (isflagset(state->request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS)) {
if (!krb5_principal_compare_any_realm(kdc_context,
state->request->client,
krb5_anonymous_principal())) {
errcode = KRB5KDC_ERR_BADOPTION;
/* Anonymous requested but anonymous principal not used.*/
state->status = "VALIDATE_ANONYMOUS_PRINCIPAL";
goto errout;
}
krb5_free_principal(kdc_context, state->request->client);
state->request->client = NULL;
errcode = krb5_copy_principal(kdc_context, krb5_anonymous_principal(),
&state->request->client);
if (errcode) {
state->status = "COPY_ANONYMOUS_PRINCIPAL";
goto errout;
}
state->enc_tkt_reply.client = state->request->client;
setflag(state->client->attributes, KRB5_KDB_REQUIRES_PRE_AUTH);
}
errcode = select_client_key(kdc_context, state->client,
state->request->ktype, state->request->nktypes,
&state->client_keyblock, &state->client_key);
if (errcode) {
state->status = "DECRYPT_CLIENT_KEY";
goto errout;
}
if (state->client_key != NULL) {
state->rock.client_key = state->client_key;
state->rock.client_keyblock = &state->client_keyblock;
}
errcode = kdc_fast_read_cookie(kdc_context, state->rstate, state->request,
state->local_tgt);
if (errcode) {
state->status = "READ_COOKIE";
goto errout;
}
/*
* Check the preauthentication if it is there.
*/
if (state->request->padata) {
check_padata(kdc_context, &state->rock, state->req_pkt,
state->request, &state->enc_tkt_reply, &state->pa_context,
&state->e_data, &state->typed_e_data, finish_preauth,
state);
} else
finish_preauth(state, 0);
return;
errout:
finish_process_as_req(state, errcode);
}
static krb5_error_code
prepare_error_as(struct kdc_request_state *rstate, krb5_kdc_req *request,
krb5_db_entry *local_tgt, int error, krb5_pa_data **e_data_in,
krb5_boolean typed_e_data, krb5_principal canon_client,
krb5_data **response, const char *status)
{
krb5_error errpkt;
krb5_error_code retval;
krb5_data *scratch = NULL, *e_data_asn1 = NULL, *fast_edata = NULL;
krb5_pa_data **e_data = NULL, *cookie = NULL;
kdc_realm_t *kdc_active_realm = rstate->realm_data;
size_t count;
errpkt.magic = KV5M_ERROR;
if (e_data_in != NULL) {
/* Add a PA-FX-COOKIE to e_data_in. e_data is a shallow copy
* containing aliases. */
for (count = 0; e_data_in[count] != NULL; count++);
e_data = calloc(count + 2, sizeof(*e_data));
if (e_data == NULL)
return ENOMEM;
memcpy(e_data, e_data_in, count * sizeof(*e_data));
retval = kdc_fast_make_cookie(kdc_context, rstate, local_tgt,
request->client, &cookie);
e_data[count] = cookie;
}
errpkt.ctime = request->nonce;
errpkt.cusec = 0;
retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec);
if (retval)
goto cleanup;
errpkt.error = error;
errpkt.server = request->server;
errpkt.client = (error == KDC_ERR_WRONG_REALM) ? canon_client :
request->client;
errpkt.text = string2data((char *)status);
if (e_data != NULL) {
if (typed_e_data)
retval = encode_krb5_typed_data(e_data, &e_data_asn1);
else
retval = encode_krb5_padata_sequence(e_data, &e_data_asn1);
if (retval)
goto cleanup;
errpkt.e_data = *e_data_asn1;
} else
errpkt.e_data = empty_data();
retval = kdc_fast_handle_error(kdc_context, rstate, request, e_data,
&errpkt, &fast_edata);
if (retval)
goto cleanup;
if (fast_edata != NULL)
errpkt.e_data = *fast_edata;
scratch = k5alloc(sizeof(*scratch), &retval);
if (scratch == NULL)
goto cleanup;
if (kdc_fast_hide_client(rstate) && errpkt.client != NULL)
errpkt.client = (krb5_principal)krb5_anonymous_principal();
retval = krb5_mk_error(kdc_context, &errpkt, scratch);
if (retval)
goto cleanup;
*response = scratch;
scratch = NULL;
cleanup:
krb5_free_data(kdc_context, fast_edata);
krb5_free_data(kdc_context, e_data_asn1);
free(scratch);
free(e_data);
if (cookie != NULL)
free(cookie->contents);
free(cookie);
return retval;
}
|
148,064,544,610,761 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* kdc/do_tgs_req.c - KDC Routines to deal with TGS_REQ's */
/*
* Copyright 1990, 1991, 2001, 2007, 2008, 2009, 2013, 2014 by the
* Massachusetts Institute of Technology. All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "k5-int.h"
#include <syslog.h>
#ifdef HAVE_NETINET_IN_H
#include <sys/types.h>
#include <netinet/in.h>
#ifndef hpux
#include <arpa/inet.h>
#endif
#endif
#include "kdc_util.h"
#include "kdc_audit.h"
#include "policy.h"
#include "extern.h"
#include "adm_proto.h"
#include <ctype.h>
static krb5_error_code
find_alternate_tgs(kdc_realm_t *, krb5_principal, krb5_db_entry **,
const char**);
static krb5_error_code
prepare_error_tgs(struct kdc_request_state *, krb5_kdc_req *,krb5_ticket *,int,
krb5_principal,krb5_data **,const char *, krb5_pa_data **);
static krb5_error_code
decrypt_2ndtkt(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **,
const char **);
static krb5_error_code
gen_session_key(kdc_realm_t *, krb5_kdc_req *, krb5_db_entry *,
krb5_keyblock *, const char **);
static krb5_int32
find_referral_tgs(kdc_realm_t *, krb5_kdc_req *, krb5_principal *);
static krb5_error_code
db_get_svc_princ(krb5_context, krb5_principal, krb5_flags,
krb5_db_entry **, const char **);
static krb5_error_code
search_sprinc(kdc_realm_t *, krb5_kdc_req *, krb5_flags,
krb5_db_entry **, const char **);
/*ARGSUSED*/
krb5_error_code
process_tgs_req(struct server_handle *handle, krb5_data *pkt,
const krb5_fulladdr *from, krb5_data **response)
{
krb5_keyblock * subkey = 0;
krb5_keyblock *header_key = NULL;
krb5_kdc_req *request = 0;
krb5_db_entry *server = NULL;
krb5_db_entry *stkt_server = NULL;
krb5_kdc_rep reply;
krb5_enc_kdc_rep_part reply_encpart;
krb5_ticket ticket_reply, *header_ticket = 0;
int st_idx = 0;
krb5_enc_tkt_part enc_tkt_reply;
int newtransited = 0;
krb5_error_code retval = 0;
krb5_keyblock encrypting_key;
krb5_timestamp kdc_time, authtime = 0;
krb5_keyblock session_key;
krb5_keyblock *reply_key = NULL;
krb5_key_data *server_key;
krb5_principal cprinc = NULL, sprinc = NULL, altcprinc = NULL;
krb5_last_req_entry *nolrarray[2], nolrentry;
int errcode;
const char *status = 0;
krb5_enc_tkt_part *header_enc_tkt = NULL; /* TGT */
krb5_enc_tkt_part *subject_tkt = NULL; /* TGT or evidence ticket */
krb5_db_entry *client = NULL, *header_server = NULL;
krb5_db_entry *local_tgt, *local_tgt_storage = NULL;
krb5_pa_s4u_x509_user *s4u_x509_user = NULL; /* protocol transition request */
krb5_authdata **kdc_issued_auth_data = NULL; /* auth data issued by KDC */
unsigned int c_flags = 0, s_flags = 0; /* client/server KDB flags */
krb5_boolean is_referral;
const char *emsg = NULL;
krb5_kvno ticket_kvno = 0;
struct kdc_request_state *state = NULL;
krb5_pa_data *pa_tgs_req; /*points into request*/
krb5_data scratch;
krb5_pa_data **e_data = NULL;
kdc_realm_t *kdc_active_realm = NULL;
krb5_audit_state *au_state = NULL;
krb5_data **auth_indicators = NULL;
memset(&reply, 0, sizeof(reply));
memset(&reply_encpart, 0, sizeof(reply_encpart));
memset(&ticket_reply, 0, sizeof(ticket_reply));
memset(&enc_tkt_reply, 0, sizeof(enc_tkt_reply));
session_key.contents = NULL;
retval = decode_krb5_tgs_req(pkt, &request);
if (retval)
return retval;
/* Save pointer to client-requested service principal, in case of
* errors before a successful call to search_sprinc(). */
sprinc = request->server;
if (request->msg_type != KRB5_TGS_REQ) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return KRB5_BADMSGTYPE;
}
/*
* setup_server_realm() sets up the global realm-specific data pointer.
*/
kdc_active_realm = setup_server_realm(handle, request->server);
if (kdc_active_realm == NULL) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return KRB5KDC_ERR_WRONG_REALM;
}
errcode = kdc_make_rstate(kdc_active_realm, &state);
if (errcode !=0) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return errcode;
}
/* Initialize audit state. */
errcode = kau_init_kdc_req(kdc_context, request, from, &au_state);
if (errcode) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return errcode;
}
/* Seed the audit trail with the request ID and basic information. */
kau_tgs_req(kdc_context, TRUE, au_state);
errcode = kdc_process_tgs_req(kdc_active_realm,
request, from, pkt, &header_ticket,
&header_server, &header_key, &subkey,
&pa_tgs_req);
if (header_ticket && header_ticket->enc_part2)
cprinc = header_ticket->enc_part2->client;
if (errcode) {
status = "PROCESS_TGS";
goto cleanup;
}
if (!header_ticket) {
errcode = KRB5_NO_TKT_SUPPLIED; /* XXX? */
status="UNEXPECTED NULL in header_ticket";
goto cleanup;
}
errcode = kau_make_tkt_id(kdc_context, header_ticket,
&au_state->tkt_in_id);
if (errcode) {
status = "GENERATE_TICKET_ID";
goto cleanup;
}
scratch.length = pa_tgs_req->length;
scratch.data = (char *) pa_tgs_req->contents;
errcode = kdc_find_fast(&request, &scratch, subkey,
header_ticket->enc_part2->session, state, NULL);
/* Reset sprinc because kdc_find_fast() can replace request. */
sprinc = request->server;
if (errcode !=0) {
status = "FIND_FAST";
goto cleanup;
}
errcode = get_local_tgt(kdc_context, &sprinc->realm, header_server,
&local_tgt, &local_tgt_storage);
if (errcode) {
status = "GET_LOCAL_TGT";
goto cleanup;
}
/* Ignore (for now) the request modification due to FAST processing. */
au_state->request = request;
/*
* Pointer to the encrypted part of the header ticket, which may be
* replaced to point to the encrypted part of the evidence ticket
* if constrained delegation is used. This simplifies the number of
* special cases for constrained delegation.
*/
header_enc_tkt = header_ticket->enc_part2;
/*
* We've already dealt with the AP_REQ authentication, so we can
* use header_ticket freely. The encrypted part (if any) has been
* decrypted with the session key.
*/
au_state->stage = SRVC_PRINC;
/* XXX make sure server here has the proper realm...taken from AP_REQ
header? */
setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK);
if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE)) {
setflag(c_flags, KRB5_KDB_FLAG_CANONICALIZE);
setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE);
}
errcode = search_sprinc(kdc_active_realm, request, s_flags, &server,
&status);
if (errcode != 0)
goto cleanup;
sprinc = server->princ;
/* If we got a cross-realm TGS which is not the requested server, we are
* issuing a referral (or alternate TGT, which we treat similarly). */
is_referral = is_cross_tgs_principal(server->princ) &&
!krb5_principal_compare(kdc_context, request->server, server->princ);
au_state->stage = VALIDATE_POL;
if ((errcode = krb5_timeofday(kdc_context, &kdc_time))) {
status = "TIME_OF_DAY";
goto cleanup;
}
if ((retval = validate_tgs_request(kdc_active_realm,
request, *server, header_ticket,
kdc_time, &status, &e_data))) {
if (!status)
status = "UNKNOWN_REASON";
if (retval == KDC_ERR_POLICY || retval == KDC_ERR_BADOPTION)
au_state->violation = PROT_CONSTRAINT;
errcode = retval + ERROR_TABLE_BASE_krb5;
goto cleanup;
}
if (!is_local_principal(kdc_active_realm, header_enc_tkt->client))
setflag(c_flags, KRB5_KDB_FLAG_CROSS_REALM);
/* Check for protocol transition */
errcode = kdc_process_s4u2self_req(kdc_active_realm,
request,
header_enc_tkt->client,
server,
subkey,
header_enc_tkt->session,
kdc_time,
&s4u_x509_user,
&client,
&status);
if (s4u_x509_user != NULL || errcode != 0) {
if (s4u_x509_user != NULL)
au_state->s4u2self_user = s4u_x509_user->user_id.user;
if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION)
au_state->violation = PROT_CONSTRAINT;
au_state->status = status;
kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state);
au_state->s4u2self_user = NULL;
}
if (errcode)
goto cleanup;
if (s4u_x509_user != NULL) {
setflag(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION);
if (is_referral) {
/* The requesting server appears to no longer exist, and we found
* a referral instead. Treat this as a server lookup failure. */
errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
status = "LOOKING_UP_SERVER";
goto cleanup;
}
}
/* Deal with user-to-user and constrained delegation */
errcode = decrypt_2ndtkt(kdc_active_realm, request, c_flags,
&stkt_server, &status);
if (errcode)
goto cleanup;
if (isflagset(request->kdc_options, KDC_OPT_CNAME_IN_ADDL_TKT)) {
/* Do constrained delegation protocol and authorization checks */
errcode = kdc_process_s4u2proxy_req(kdc_active_realm,
request,
request->second_ticket[st_idx]->enc_part2,
stkt_server,
header_ticket->enc_part2->client,
request->server,
&status);
if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION)
au_state->violation = PROT_CONSTRAINT;
else if (errcode)
au_state->violation = LOCAL_POLICY;
au_state->status = status;
retval = kau_make_tkt_id(kdc_context, request->second_ticket[st_idx],
&au_state->evid_tkt_id);
if (retval) {
status = "GENERATE_TICKET_ID";
errcode = retval;
goto cleanup;
}
kau_s4u2proxy(kdc_context, errcode ? FALSE : TRUE, au_state);
if (errcode)
goto cleanup;
setflag(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION);
assert(krb5_is_tgs_principal(header_ticket->server));
assert(client == NULL); /* assured by kdc_process_s4u2self_req() */
client = stkt_server;
stkt_server = NULL;
} else if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) {
krb5_db_free_principal(kdc_context, stkt_server);
stkt_server = NULL;
} else
assert(stkt_server == NULL);
au_state->stage = ISSUE_TKT;
errcode = gen_session_key(kdc_active_realm, request, server, &session_key,
&status);
if (errcode)
goto cleanup;
/*
* subject_tkt will refer to the evidence ticket (for constrained
* delegation) or the TGT. The distinction from header_enc_tkt is
* necessary because the TGS signature only protects some fields:
* the others could be forged by a malicious server.
*/
if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION))
subject_tkt = request->second_ticket[st_idx]->enc_part2;
else
subject_tkt = header_enc_tkt;
authtime = subject_tkt->times.authtime;
/* Extract auth indicators from the subject ticket, except for S4U2Proxy
* requests (where the client didn't authenticate). */
if (s4u_x509_user == NULL) {
errcode = get_auth_indicators(kdc_context, subject_tkt, local_tgt,
&auth_indicators);
if (errcode) {
status = "GET_AUTH_INDICATORS";
goto cleanup;
}
}
errcode = check_indicators(kdc_context, server, auth_indicators);
if (errcode) {
status = "HIGHER_AUTHENTICATION_REQUIRED";
goto cleanup;
}
if (is_referral)
ticket_reply.server = server->princ;
else
ticket_reply.server = request->server; /* XXX careful for realm... */
enc_tkt_reply.flags = OPTS2FLAGS(request->kdc_options);
enc_tkt_reply.flags |= COPY_TKT_FLAGS(header_enc_tkt->flags);
enc_tkt_reply.times.starttime = 0;
if (isflagset(server->attributes, KRB5_KDB_OK_AS_DELEGATE))
setflag(enc_tkt_reply.flags, TKT_FLG_OK_AS_DELEGATE);
/* Indicate support for encrypted padata (RFC 6806). */
setflag(enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP);
/* don't use new addresses unless forwarded, see below */
enc_tkt_reply.caddrs = header_enc_tkt->caddrs;
/* noaddrarray[0] = 0; */
reply_encpart.caddrs = 0;/* optional...don't put it in */
reply_encpart.enc_padata = NULL;
/*
* It should be noted that local policy may affect the
* processing of any of these flags. For example, some
* realms may refuse to issue renewable tickets
*/
if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE)) {
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) {
/*
* If S4U2Self principal is not forwardable, then mark ticket as
* unforwardable. This behaviour matches Windows, but it is
* different to the MIT AS-REQ path, which returns an error
* (KDC_ERR_POLICY) if forwardable tickets cannot be issued.
*
* Consider this block the S4U2Self equivalent to
* validate_forwardable().
*/
if (client != NULL &&
isflagset(client->attributes, KRB5_KDB_DISALLOW_FORWARDABLE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
/*
* Forwardable flag is propagated along referral path.
*/
else if (!isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDABLE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
/*
* OK_TO_AUTH_AS_DELEGATE must be set on the service requesting
* S4U2Self in order for forwardable tickets to be returned.
*/
else if (!is_referral &&
!isflagset(server->attributes,
KRB5_KDB_OK_TO_AUTH_AS_DELEGATE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
}
}
if (isflagset(request->kdc_options, KDC_OPT_FORWARDED) ||
isflagset(request->kdc_options, KDC_OPT_PROXY)) {
/* include new addresses in ticket & reply */
enc_tkt_reply.caddrs = request->addresses;
reply_encpart.caddrs = request->addresses;
}
/* We don't currently handle issuing anonymous tickets based on
* non-anonymous ones, so just ignore the option. */
if (isflagset(request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS) &&
!isflagset(header_enc_tkt->flags, TKT_FLG_ANONYMOUS))
clear(enc_tkt_reply.flags, TKT_FLG_ANONYMOUS);
if (isflagset(request->kdc_options, KDC_OPT_POSTDATED)) {
setflag(enc_tkt_reply.flags, TKT_FLG_INVALID);
enc_tkt_reply.times.starttime = request->from;
} else
enc_tkt_reply.times.starttime = kdc_time;
if (isflagset(request->kdc_options, KDC_OPT_VALIDATE)) {
assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0);
/* BEWARE of allocation hanging off of ticket & enc_part2, it belongs
to the caller */
ticket_reply = *(header_ticket);
enc_tkt_reply = *(header_ticket->enc_part2);
enc_tkt_reply.authorization_data = NULL;
clear(enc_tkt_reply.flags, TKT_FLG_INVALID);
}
if (isflagset(request->kdc_options, KDC_OPT_RENEW)) {
krb5_timestamp old_starttime;
krb5_deltat old_life;
assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0);
/* BEWARE of allocation hanging off of ticket & enc_part2, it belongs
to the caller */
ticket_reply = *(header_ticket);
enc_tkt_reply = *(header_ticket->enc_part2);
enc_tkt_reply.authorization_data = NULL;
old_starttime = enc_tkt_reply.times.starttime ?
enc_tkt_reply.times.starttime : enc_tkt_reply.times.authtime;
old_life = ts_delta(enc_tkt_reply.times.endtime, old_starttime);
enc_tkt_reply.times.starttime = kdc_time;
enc_tkt_reply.times.endtime =
ts_min(header_ticket->enc_part2->times.renew_till,
ts_incr(kdc_time, old_life));
} else {
/* not a renew request */
enc_tkt_reply.times.starttime = kdc_time;
kdc_get_ticket_endtime(kdc_active_realm, enc_tkt_reply.times.starttime,
header_enc_tkt->times.endtime, request->till,
client, server, &enc_tkt_reply.times.endtime);
}
kdc_get_ticket_renewtime(kdc_active_realm, request, header_enc_tkt, client,
server, &enc_tkt_reply);
/*
* Set authtime to be the same as header or evidence ticket's
*/
enc_tkt_reply.times.authtime = authtime;
/* starttime is optional, and treated as authtime if not present.
so we can nuke it if it matches */
if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime)
enc_tkt_reply.times.starttime = 0;
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) {
altcprinc = s4u_x509_user->user_id.user;
} else if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) {
altcprinc = subject_tkt->client;
} else {
altcprinc = NULL;
}
if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) {
krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2;
encrypting_key = *(t2enc->session);
} else {
/*
* Find the server key
*/
if ((errcode = krb5_dbe_find_enctype(kdc_context, server,
-1, /* ignore keytype */
-1, /* Ignore salttype */
0, /* Get highest kvno */
&server_key))) {
status = "FINDING_SERVER_KEY";
goto cleanup;
}
/*
* Convert server.key into a real key
* (it may be encrypted in the database)
*/
if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL,
server_key, &encrypting_key,
NULL))) {
status = "DECRYPT_SERVER_KEY";
goto cleanup;
}
}
if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) {
/*
* Don't allow authorization data to be disabled if constrained
* delegation is requested. We don't want to deny the server
* the ability to validate that delegation was used.
*/
clear(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED);
}
if (isflagset(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED) == 0) {
/*
* If we are not doing protocol transition/constrained delegation
* try to lookup the client principal so plugins can add additional
* authorization information.
*
* Always validate authorization data for constrained delegation
* because we must validate the KDC signatures.
*/
if (!isflagset(c_flags, KRB5_KDB_FLAGS_S4U)) {
/* Generate authorization data so we can include it in ticket */
setflag(c_flags, KRB5_KDB_FLAG_INCLUDE_PAC);
/* Map principals from foreign (possibly non-AD) realms */
setflag(c_flags, KRB5_KDB_FLAG_MAP_PRINCIPALS);
assert(client == NULL); /* should not have been set already */
errcode = krb5_db_get_principal(kdc_context, subject_tkt->client,
c_flags, &client);
}
}
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) &&
!isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM))
enc_tkt_reply.client = s4u_x509_user->user_id.user;
else
enc_tkt_reply.client = subject_tkt->client;
enc_tkt_reply.session = &session_key;
enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
enc_tkt_reply.transited.tr_contents = empty_string; /* equivalent of "" */
/*
* Only add the realm of the presented tgt to the transited list if
* it is different than the local realm (cross-realm) and it is different
* than the realm of the client (since the realm of the client is already
* implicitly part of the transited list and should not be explicitly
* listed).
*/
/* realm compare is like strcmp, but knows how to deal with these args */
if (krb5_realm_compare(kdc_context, header_ticket->server, tgs_server) ||
krb5_realm_compare(kdc_context, header_ticket->server,
enc_tkt_reply.client)) {
/* tgt issued by local realm or issued by realm of client */
enc_tkt_reply.transited = header_enc_tkt->transited;
} else {
/* tgt issued by some other realm and not the realm of the client */
/* assemble new transited field into allocated storage */
if (header_enc_tkt->transited.tr_type !=
KRB5_DOMAIN_X500_COMPRESS) {
status = "VALIDATE_TRANSIT_TYPE";
errcode = KRB5KDC_ERR_TRTYPE_NOSUPP;
goto cleanup;
}
memset(&enc_tkt_reply.transited, 0, sizeof(enc_tkt_reply.transited));
enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
if ((errcode =
add_to_transited(&header_enc_tkt->transited.tr_contents,
&enc_tkt_reply.transited.tr_contents,
header_ticket->server,
enc_tkt_reply.client,
request->server))) {
status = "ADD_TO_TRANSITED_LIST";
goto cleanup;
}
newtransited = 1;
}
if (isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) {
errcode = validate_transit_path(kdc_context, header_enc_tkt->client,
server, header_server);
if (errcode) {
status = "NON_TRANSITIVE";
goto cleanup;
}
}
if (!isflagset (request->kdc_options, KDC_OPT_DISABLE_TRANSITED_CHECK)) {
errcode = kdc_check_transited_list (kdc_active_realm,
&enc_tkt_reply.transited.tr_contents,
krb5_princ_realm (kdc_context, header_enc_tkt->client),
krb5_princ_realm (kdc_context, request->server));
if (errcode == 0) {
setflag (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED);
} else {
log_tgs_badtrans(kdc_context, cprinc, sprinc,
&enc_tkt_reply.transited.tr_contents, errcode);
}
} else
krb5_klog_syslog(LOG_INFO, _("not checking transit path"));
if (kdc_active_realm->realm_reject_bad_transit &&
!isflagset(enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED)) {
errcode = KRB5KDC_ERR_POLICY;
status = "BAD_TRANSIT";
au_state->violation = LOCAL_POLICY;
goto cleanup;
}
errcode = handle_authdata(kdc_context, c_flags, client, server,
header_server, local_tgt,
subkey != NULL ? subkey :
header_ticket->enc_part2->session,
&encrypting_key, /* U2U or server key */
header_key,
pkt,
request,
s4u_x509_user ?
s4u_x509_user->user_id.user : NULL,
subject_tkt,
auth_indicators,
&enc_tkt_reply);
if (errcode) {
krb5_klog_syslog(LOG_INFO, _("TGS_REQ : handle_authdata (%d)"),
errcode);
status = "HANDLE_AUTHDATA";
goto cleanup;
}
ticket_reply.enc_part2 = &enc_tkt_reply;
/*
* If we are doing user-to-user authentication, then make sure
* that the client for the second ticket matches the request
* server, and then encrypt the ticket using the session key of
* the second ticket.
*/
if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) {
/*
* Make sure the client for the second ticket matches
* requested server.
*/
krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2;
krb5_principal client2 = t2enc->client;
if (!krb5_principal_compare(kdc_context, request->server, client2)) {
altcprinc = client2;
errcode = KRB5KDC_ERR_SERVER_NOMATCH;
status = "2ND_TKT_MISMATCH";
au_state->status = status;
kau_u2u(kdc_context, FALSE, au_state);
goto cleanup;
}
ticket_kvno = 0;
ticket_reply.enc_part.enctype = t2enc->session->enctype;
kau_u2u(kdc_context, TRUE, au_state);
st_idx++;
} else {
ticket_kvno = server_key->key_data_kvno;
}
errcode = krb5_encrypt_tkt_part(kdc_context, &encrypting_key,
&ticket_reply);
if (!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY))
krb5_free_keyblock_contents(kdc_context, &encrypting_key);
if (errcode) {
status = "ENCRYPT_TICKET";
goto cleanup;
}
ticket_reply.enc_part.kvno = ticket_kvno;
/* Start assembling the response */
au_state->stage = ENCR_REP;
reply.msg_type = KRB5_TGS_REP;
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) &&
krb5int_find_pa_data(kdc_context, request->padata,
KRB5_PADATA_S4U_X509_USER) != NULL) {
errcode = kdc_make_s4u2self_rep(kdc_context,
subkey,
header_ticket->enc_part2->session,
s4u_x509_user,
&reply,
&reply_encpart);
if (errcode) {
status = "MAKE_S4U2SELF_PADATA";
au_state->status = status;
}
kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state);
if (errcode)
goto cleanup;
}
reply.client = enc_tkt_reply.client;
reply.enc_part.kvno = 0;/* We are using the session key */
reply.ticket = &ticket_reply;
reply_encpart.session = &session_key;
reply_encpart.nonce = request->nonce;
/* copy the time fields */
reply_encpart.times = enc_tkt_reply.times;
nolrentry.lr_type = KRB5_LRQ_NONE;
nolrentry.value = 0;
nolrentry.magic = 0;
nolrarray[0] = &nolrentry;
nolrarray[1] = 0;
reply_encpart.last_req = nolrarray; /* not available for TGS reqs */
reply_encpart.key_exp = 0;/* ditto */
reply_encpart.flags = enc_tkt_reply.flags;
reply_encpart.server = ticket_reply.server;
/* use the session key in the ticket, unless there's a subsession key
in the AP_REQ */
reply.enc_part.enctype = subkey ? subkey->enctype :
header_ticket->enc_part2->session->enctype;
errcode = kdc_fast_response_handle_padata(state, request, &reply,
subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype);
if (errcode !=0 ) {
status = "MAKE_FAST_RESPONSE";
goto cleanup;
}
errcode =kdc_fast_handle_reply_key(state,
subkey?subkey:header_ticket->enc_part2->session, &reply_key);
if (errcode) {
status = "MAKE_FAST_REPLY_KEY";
goto cleanup;
}
errcode = return_enc_padata(kdc_context, pkt, request,
reply_key, server, &reply_encpart,
is_referral &&
isflagset(s_flags,
KRB5_KDB_FLAG_CANONICALIZE));
if (errcode) {
status = "KDC_RETURN_ENC_PADATA";
goto cleanup;
}
errcode = kau_make_tkt_id(kdc_context, &ticket_reply, &au_state->tkt_out_id);
if (errcode) {
status = "GENERATE_TICKET_ID";
goto cleanup;
}
if (kdc_fast_hide_client(state))
reply.client = (krb5_principal)krb5_anonymous_principal();
errcode = krb5_encode_kdc_rep(kdc_context, KRB5_TGS_REP, &reply_encpart,
subkey ? 1 : 0,
reply_key,
&reply, response);
if (errcode) {
status = "ENCODE_KDC_REP";
} else {
status = "ISSUE";
}
memset(ticket_reply.enc_part.ciphertext.data, 0,
ticket_reply.enc_part.ciphertext.length);
free(ticket_reply.enc_part.ciphertext.data);
/* these parts are left on as a courtesy from krb5_encode_kdc_rep so we
can use them in raw form if needed. But, we don't... */
memset(reply.enc_part.ciphertext.data, 0,
reply.enc_part.ciphertext.length);
free(reply.enc_part.ciphertext.data);
cleanup:
assert(status != NULL);
if (reply_key)
krb5_free_keyblock(kdc_context, reply_key);
if (errcode)
emsg = krb5_get_error_message (kdc_context, errcode);
au_state->status = status;
if (!errcode)
au_state->reply = &reply;
kau_tgs_req(kdc_context, errcode ? FALSE : TRUE, au_state);
kau_free_kdc_req(au_state);
log_tgs_req(kdc_context, from, request, &reply, cprinc,
sprinc, altcprinc, authtime,
c_flags, status, errcode, emsg);
if (errcode) {
krb5_free_error_message (kdc_context, emsg);
emsg = NULL;
}
if (errcode) {
int got_err = 0;
if (status == 0) {
status = krb5_get_error_message (kdc_context, errcode);
got_err = 1;
}
errcode -= ERROR_TABLE_BASE_krb5;
if (errcode < 0 || errcode > KRB_ERR_MAX)
errcode = KRB_ERR_GENERIC;
retval = prepare_error_tgs(state, request, header_ticket, errcode,
(server != NULL) ? server->princ : NULL,
response, status, e_data);
if (got_err) {
krb5_free_error_message (kdc_context, status);
status = 0;
}
}
if (header_ticket != NULL)
krb5_free_ticket(kdc_context, header_ticket);
if (request != NULL)
krb5_free_kdc_req(kdc_context, request);
if (state)
kdc_free_rstate(state);
krb5_db_free_principal(kdc_context, server);
krb5_db_free_principal(kdc_context, stkt_server);
krb5_db_free_principal(kdc_context, header_server);
krb5_db_free_principal(kdc_context, client);
krb5_db_free_principal(kdc_context, local_tgt_storage);
if (session_key.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &session_key);
if (newtransited)
free(enc_tkt_reply.transited.tr_contents.data);
if (s4u_x509_user != NULL)
krb5_free_pa_s4u_x509_user(kdc_context, s4u_x509_user);
if (kdc_issued_auth_data != NULL)
krb5_free_authdata(kdc_context, kdc_issued_auth_data);
if (subkey != NULL)
krb5_free_keyblock(kdc_context, subkey);
if (header_key != NULL)
krb5_free_keyblock(kdc_context, header_key);
if (reply.padata)
krb5_free_pa_data(kdc_context, reply.padata);
if (reply_encpart.enc_padata)
krb5_free_pa_data(kdc_context, reply_encpart.enc_padata);
if (enc_tkt_reply.authorization_data != NULL)
krb5_free_authdata(kdc_context, enc_tkt_reply.authorization_data);
krb5_free_pa_data(kdc_context, e_data);
k5_free_data_ptr_list(auth_indicators);
return retval;
}
static krb5_error_code
prepare_error_tgs (struct kdc_request_state *state,
krb5_kdc_req *request, krb5_ticket *ticket, int error,
krb5_principal canon_server,
krb5_data **response, const char *status,
krb5_pa_data **e_data)
{
krb5_error errpkt;
krb5_error_code retval = 0;
krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL;
kdc_realm_t *kdc_active_realm = state->realm_data;
errpkt.magic = KV5M_ERROR;
errpkt.ctime = request->nonce;
errpkt.cusec = 0;
if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime,
&errpkt.susec)))
return(retval);
errpkt.error = error;
errpkt.server = request->server;
if (ticket && ticket->enc_part2)
errpkt.client = ticket->enc_part2->client;
else
errpkt.client = NULL;
errpkt.text.length = strlen(status);
if (!(errpkt.text.data = strdup(status)))
return ENOMEM;
if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) {
free(errpkt.text.data);
return ENOMEM;
}
if (e_data != NULL) {
retval = encode_krb5_padata_sequence(e_data, &e_data_asn1);
if (retval) {
free(scratch);
free(errpkt.text.data);
return retval;
}
errpkt.e_data = *e_data_asn1;
} else
errpkt.e_data = empty_data();
retval = kdc_fast_handle_error(kdc_context, state, request, e_data,
&errpkt, &fast_edata);
if (retval) {
free(scratch);
free(errpkt.text.data);
krb5_free_data(kdc_context, e_data_asn1);
return retval;
}
if (fast_edata)
errpkt.e_data = *fast_edata;
if (kdc_fast_hide_client(state) && errpkt.client != NULL)
errpkt.client = (krb5_principal)krb5_anonymous_principal();
retval = krb5_mk_error(kdc_context, &errpkt, scratch);
free(errpkt.text.data);
krb5_free_data(kdc_context, e_data_asn1);
krb5_free_data(kdc_context, fast_edata);
if (retval)
free(scratch);
else
*response = scratch;
return retval;
}
/* KDC options that require a second ticket */
#define STKT_OPTIONS (KDC_OPT_CNAME_IN_ADDL_TKT | KDC_OPT_ENC_TKT_IN_SKEY)
/*
* Get the key for the second ticket, if any, and decrypt it.
*/
static krb5_error_code
decrypt_2ndtkt(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_flags flags, krb5_db_entry **server_out,
const char **status)
{
krb5_error_code retval;
krb5_db_entry *server = NULL;
krb5_keyblock *key;
krb5_kvno kvno;
krb5_ticket *stkt;
if (!(req->kdc_options & STKT_OPTIONS))
return 0;
stkt = req->second_ticket[0];
retval = kdc_get_server_key(kdc_context, stkt,
flags,
TRUE, /* match_enctype */
&server,
&key,
&kvno);
if (retval != 0) {
*status = "2ND_TKT_SERVER";
goto cleanup;
}
retval = krb5_decrypt_tkt_part(kdc_context, key,
req->second_ticket[0]);
krb5_free_keyblock(kdc_context, key);
if (retval != 0) {
*status = "2ND_TKT_DECRYPT";
goto cleanup;
}
*server_out = server;
server = NULL;
cleanup:
krb5_db_free_principal(kdc_context, server);
return retval;
}
static krb5_error_code
get_2ndtkt_enctype(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_enctype *useenctype, const char **status)
{
krb5_enctype etype;
krb5_ticket *stkt = req->second_ticket[0];
int i;
etype = stkt->enc_part2->session->enctype;
if (!krb5_c_valid_enctype(etype)) {
*status = "BAD_ETYPE_IN_2ND_TKT";
return KRB5KDC_ERR_ETYPE_NOSUPP;
}
for (i = 0; i < req->nktypes; i++) {
if (req->ktype[i] == etype) {
*useenctype = etype;
break;
}
}
return 0;
}
static krb5_error_code
gen_session_key(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_db_entry *server, krb5_keyblock *skey,
const char **status)
{
krb5_error_code retval;
krb5_enctype useenctype = 0;
/*
* Some special care needs to be taken in the user-to-user
* case, since we don't know what keytypes the application server
* which is doing user-to-user authentication can support. We
* know that it at least must be able to support the encryption
* type of the session key in the TGT, since otherwise it won't be
* able to decrypt the U2U ticket! So we use that in preference
* to anything else.
*/
if (req->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) {
retval = get_2ndtkt_enctype(kdc_active_realm, req, &useenctype,
status);
if (retval != 0)
goto cleanup;
}
if (useenctype == 0) {
useenctype = select_session_keytype(kdc_active_realm, server,
req->nktypes,
req->ktype);
}
if (useenctype == 0) {
/* unsupported ktype */
*status = "BAD_ENCRYPTION_TYPE";
retval = KRB5KDC_ERR_ETYPE_NOSUPP;
goto cleanup;
}
retval = krb5_c_make_random_key(kdc_context, useenctype, skey);
if (retval != 0) {
/* random key failed */
*status = "MAKE_RANDOM_KEY";
goto cleanup;
}
cleanup:
return retval;
}
/*
* The request seems to be for a ticket-granting service somewhere else,
* but we don't have a ticket for the final TGS. Try to give the requestor
* some intermediate realm.
*/
static krb5_error_code
find_alternate_tgs(kdc_realm_t *kdc_active_realm, krb5_principal princ,
krb5_db_entry **server_ptr, const char **status)
{
krb5_error_code retval;
krb5_principal *plist = NULL, *pl2;
krb5_data tmp;
krb5_db_entry *server = NULL;
*server_ptr = NULL;
assert(is_cross_tgs_principal(princ));
if ((retval = krb5_walk_realm_tree(kdc_context,
krb5_princ_realm(kdc_context, princ),
krb5_princ_component(kdc_context, princ, 1),
&plist, KRB5_REALM_BRANCH_CHAR))) {
goto cleanup;
}
/* move to the end */
for (pl2 = plist; *pl2; pl2++);
/* the first entry in this array is for krbtgt/local@local, so we
ignore it */
while (--pl2 > plist) {
tmp = *krb5_princ_realm(kdc_context, *pl2);
krb5_princ_set_realm(kdc_context, *pl2,
krb5_princ_realm(kdc_context, princ));
retval = db_get_svc_princ(kdc_context, *pl2, 0, &server, status);
krb5_princ_set_realm(kdc_context, *pl2, &tmp);
if (retval == KRB5_KDB_NOENTRY)
continue;
else if (retval)
goto cleanup;
log_tgs_alt_tgt(kdc_context, server->princ);
*server_ptr = server;
server = NULL;
goto cleanup;
}
cleanup:
if (retval == 0 && *server_ptr == NULL)
retval = KRB5_KDB_NOENTRY;
if (retval != 0)
*status = "UNKNOWN_SERVER";
krb5_free_realm_tree(kdc_context, plist);
krb5_db_free_principal(kdc_context, server);
return retval;
}
/* Return true if item is an element of the space/comma-separated list. */
static krb5_boolean
in_list(const char *list, const char *item)
{
const char *p;
int len = strlen(item);
if (list == NULL)
return FALSE;
for (p = strstr(list, item); p != NULL; p = strstr(p + 1, item)) {
if ((p == list || isspace((unsigned char)p[-1]) || p[-1] == ',') &&
(p[len] == '\0' || isspace((unsigned char)p[len]) ||
p[len] == ','))
return TRUE;
}
return FALSE;
}
/*
* Check whether the request satisfies the conditions for generating a referral
* TGT. The caller checks whether the hostname component looks like a FQDN.
*/
static krb5_boolean
is_referral_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request)
{
krb5_boolean ret = FALSE;
char *stype = NULL;
char *hostbased = kdc_active_realm->realm_hostbased;
char *no_referral = kdc_active_realm->realm_no_referral;
if (!(request->kdc_options & KDC_OPT_CANONICALIZE))
return FALSE;
if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY)
return FALSE;
if (krb5_princ_size(kdc_context, request->server) != 2)
return FALSE;
stype = data2string(krb5_princ_component(kdc_context, request->server, 0));
if (stype == NULL)
return FALSE;
switch (krb5_princ_type(kdc_context, request->server)) {
case KRB5_NT_UNKNOWN:
/* Allow referrals for NT-UNKNOWN principals, if configured. */
if (!in_list(hostbased, stype) && !in_list(hostbased, "*"))
goto cleanup;
/* FALLTHROUGH */
case KRB5_NT_SRV_HST:
case KRB5_NT_SRV_INST:
/* Deny referrals for specific service types, if configured. */
if (in_list(no_referral, stype) || in_list(no_referral, "*"))
goto cleanup;
ret = TRUE;
break;
default:
goto cleanup;
}
cleanup:
free(stype);
return ret;
}
/*
* Find a remote realm TGS principal for an unknown host-based service
* principal.
*/
static krb5_int32
find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request,
krb5_principal *krbtgt_princ)
{
krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
char **realms = NULL, *hostname = NULL;
krb5_data srealm = request->server->realm;
if (!is_referral_req(kdc_active_realm, request))
goto cleanup;
hostname = data2string(krb5_princ_component(kdc_context,
request->server, 1));
if (hostname == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* If the hostname doesn't contain a '.', it's not a FQDN. */
if (strchr(hostname, '.') == NULL)
goto cleanup;
retval = krb5_get_host_realm(kdc_context, hostname, &realms);
if (retval) {
/* no match found */
kdc_err(kdc_context, retval, "unable to find realm of host");
goto cleanup;
}
/* Don't return a referral to the empty realm or the service realm. */
if (realms == NULL || realms[0] == NULL || *realms[0] == '\0' ||
data_eq_string(srealm, realms[0])) {
retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto cleanup;
}
retval = krb5_build_principal(kdc_context, krbtgt_princ,
srealm.length, srealm.data,
"krbtgt", realms[0], (char *)0);
cleanup:
krb5_free_host_realm(kdc_context, realms);
free(hostname);
return retval;
}
static krb5_error_code
db_get_svc_princ(krb5_context ctx, krb5_principal princ,
krb5_flags flags, krb5_db_entry **server,
const char **status)
{
krb5_error_code ret;
ret = krb5_db_get_principal(ctx, princ, flags, server);
if (ret == KRB5_KDB_CANTLOCK_DB)
ret = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (ret != 0) {
*status = "LOOKING_UP_SERVER";
}
return ret;
}
static krb5_error_code
search_sprinc(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_flags flags, krb5_db_entry **server, const char **status)
{
krb5_error_code ret;
krb5_principal princ = req->server;
krb5_principal reftgs = NULL;
krb5_boolean allow_referral;
/* Do not allow referrals for u2u or ticket modification requests, because
* the server is supposed to match an already-issued ticket. */
allow_referral = !(req->kdc_options & NO_REFERRAL_OPTION);
if (!allow_referral)
flags &= ~KRB5_KDB_FLAG_CANONICALIZE;
ret = db_get_svc_princ(kdc_context, princ, flags, server, status);
if (ret == 0 || ret != KRB5_KDB_NOENTRY || !allow_referral)
goto cleanup;
if (!is_cross_tgs_principal(req->server)) {
ret = find_referral_tgs(kdc_active_realm, req, &reftgs);
if (ret != 0)
goto cleanup;
ret = db_get_svc_princ(kdc_context, reftgs, flags, server, status);
if (ret == 0 || ret != KRB5_KDB_NOENTRY)
goto cleanup;
princ = reftgs;
}
ret = find_alternate_tgs(kdc_active_realm, princ, server, status);
cleanup:
if (ret != 0 && ret != KRB5KDC_ERR_SVC_UNAVAILABLE) {
ret = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
if (*status == NULL)
*status = "LOOKING_UP_SERVER";
}
krb5_free_principal(kdc_context, reftgs);
return ret;
}
| /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* kdc/do_tgs_req.c - KDC Routines to deal with TGS_REQ's */
/*
* Copyright 1990, 1991, 2001, 2007, 2008, 2009, 2013, 2014 by the
* Massachusetts Institute of Technology. All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "k5-int.h"
#include <syslog.h>
#ifdef HAVE_NETINET_IN_H
#include <sys/types.h>
#include <netinet/in.h>
#ifndef hpux
#include <arpa/inet.h>
#endif
#endif
#include "kdc_util.h"
#include "kdc_audit.h"
#include "policy.h"
#include "extern.h"
#include "adm_proto.h"
#include <ctype.h>
static krb5_error_code
find_alternate_tgs(kdc_realm_t *, krb5_principal, krb5_db_entry **,
const char**);
static krb5_error_code
prepare_error_tgs(struct kdc_request_state *, krb5_kdc_req *,krb5_ticket *,int,
krb5_principal,krb5_data **,const char *, krb5_pa_data **);
static krb5_error_code
decrypt_2ndtkt(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **,
const char **);
static krb5_error_code
gen_session_key(kdc_realm_t *, krb5_kdc_req *, krb5_db_entry *,
krb5_keyblock *, const char **);
static krb5_int32
find_referral_tgs(kdc_realm_t *, krb5_kdc_req *, krb5_principal *);
static krb5_error_code
db_get_svc_princ(krb5_context, krb5_principal, krb5_flags,
krb5_db_entry **, const char **);
static krb5_error_code
search_sprinc(kdc_realm_t *, krb5_kdc_req *, krb5_flags,
krb5_db_entry **, const char **);
/*ARGSUSED*/
krb5_error_code
process_tgs_req(struct server_handle *handle, krb5_data *pkt,
const krb5_fulladdr *from, krb5_data **response)
{
krb5_keyblock * subkey = 0;
krb5_keyblock *header_key = NULL;
krb5_kdc_req *request = 0;
krb5_db_entry *server = NULL;
krb5_db_entry *stkt_server = NULL;
krb5_kdc_rep reply;
krb5_enc_kdc_rep_part reply_encpart;
krb5_ticket ticket_reply, *header_ticket = 0;
int st_idx = 0;
krb5_enc_tkt_part enc_tkt_reply;
int newtransited = 0;
krb5_error_code retval = 0;
krb5_keyblock encrypting_key;
krb5_timestamp kdc_time, authtime = 0;
krb5_keyblock session_key;
krb5_keyblock *reply_key = NULL;
krb5_key_data *server_key;
krb5_principal cprinc = NULL, sprinc = NULL, altcprinc = NULL;
krb5_last_req_entry *nolrarray[2], nolrentry;
int errcode;
const char *status = 0;
krb5_enc_tkt_part *header_enc_tkt = NULL; /* TGT */
krb5_enc_tkt_part *subject_tkt = NULL; /* TGT or evidence ticket */
krb5_db_entry *client = NULL, *header_server = NULL;
krb5_db_entry *local_tgt, *local_tgt_storage = NULL;
krb5_pa_s4u_x509_user *s4u_x509_user = NULL; /* protocol transition request */
krb5_authdata **kdc_issued_auth_data = NULL; /* auth data issued by KDC */
unsigned int c_flags = 0, s_flags = 0; /* client/server KDB flags */
krb5_boolean is_referral;
const char *emsg = NULL;
krb5_kvno ticket_kvno = 0;
struct kdc_request_state *state = NULL;
krb5_pa_data *pa_tgs_req; /*points into request*/
krb5_data scratch;
krb5_pa_data **e_data = NULL;
kdc_realm_t *kdc_active_realm = NULL;
krb5_audit_state *au_state = NULL;
krb5_data **auth_indicators = NULL;
memset(&reply, 0, sizeof(reply));
memset(&reply_encpart, 0, sizeof(reply_encpart));
memset(&ticket_reply, 0, sizeof(ticket_reply));
memset(&enc_tkt_reply, 0, sizeof(enc_tkt_reply));
session_key.contents = NULL;
retval = decode_krb5_tgs_req(pkt, &request);
if (retval)
return retval;
/* Save pointer to client-requested service principal, in case of
* errors before a successful call to search_sprinc(). */
sprinc = request->server;
if (request->msg_type != KRB5_TGS_REQ) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return KRB5_BADMSGTYPE;
}
/*
* setup_server_realm() sets up the global realm-specific data pointer.
*/
kdc_active_realm = setup_server_realm(handle, request->server);
if (kdc_active_realm == NULL) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return KRB5KDC_ERR_WRONG_REALM;
}
errcode = kdc_make_rstate(kdc_active_realm, &state);
if (errcode !=0) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return errcode;
}
/* Initialize audit state. */
errcode = kau_init_kdc_req(kdc_context, request, from, &au_state);
if (errcode) {
krb5_free_kdc_req(handle->kdc_err_context, request);
return errcode;
}
/* Seed the audit trail with the request ID and basic information. */
kau_tgs_req(kdc_context, TRUE, au_state);
errcode = kdc_process_tgs_req(kdc_active_realm,
request, from, pkt, &header_ticket,
&header_server, &header_key, &subkey,
&pa_tgs_req);
if (header_ticket && header_ticket->enc_part2)
cprinc = header_ticket->enc_part2->client;
if (errcode) {
status = "PROCESS_TGS";
goto cleanup;
}
if (!header_ticket) {
errcode = KRB5_NO_TKT_SUPPLIED; /* XXX? */
status="UNEXPECTED NULL in header_ticket";
goto cleanup;
}
errcode = kau_make_tkt_id(kdc_context, header_ticket,
&au_state->tkt_in_id);
if (errcode) {
status = "GENERATE_TICKET_ID";
goto cleanup;
}
scratch.length = pa_tgs_req->length;
scratch.data = (char *) pa_tgs_req->contents;
errcode = kdc_find_fast(&request, &scratch, subkey,
header_ticket->enc_part2->session, state, NULL);
/* Reset sprinc because kdc_find_fast() can replace request. */
sprinc = request->server;
if (errcode !=0) {
status = "FIND_FAST";
goto cleanup;
}
errcode = get_local_tgt(kdc_context, &sprinc->realm, header_server,
&local_tgt, &local_tgt_storage);
if (errcode) {
status = "GET_LOCAL_TGT";
goto cleanup;
}
/* Ignore (for now) the request modification due to FAST processing. */
au_state->request = request;
/*
* Pointer to the encrypted part of the header ticket, which may be
* replaced to point to the encrypted part of the evidence ticket
* if constrained delegation is used. This simplifies the number of
* special cases for constrained delegation.
*/
header_enc_tkt = header_ticket->enc_part2;
/*
* We've already dealt with the AP_REQ authentication, so we can
* use header_ticket freely. The encrypted part (if any) has been
* decrypted with the session key.
*/
au_state->stage = SRVC_PRINC;
/* XXX make sure server here has the proper realm...taken from AP_REQ
header? */
setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK);
if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE)) {
setflag(c_flags, KRB5_KDB_FLAG_CANONICALIZE);
setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE);
}
errcode = search_sprinc(kdc_active_realm, request, s_flags, &server,
&status);
if (errcode != 0)
goto cleanup;
sprinc = server->princ;
/* If we got a cross-realm TGS which is not the requested server, we are
* issuing a referral (or alternate TGT, which we treat similarly). */
is_referral = is_cross_tgs_principal(server->princ) &&
!krb5_principal_compare(kdc_context, request->server, server->princ);
au_state->stage = VALIDATE_POL;
if ((errcode = krb5_timeofday(kdc_context, &kdc_time))) {
status = "TIME_OF_DAY";
goto cleanup;
}
if ((retval = validate_tgs_request(kdc_active_realm,
request, *server, header_ticket,
kdc_time, &status, &e_data))) {
if (!status)
status = "UNKNOWN_REASON";
if (retval == KDC_ERR_POLICY || retval == KDC_ERR_BADOPTION)
au_state->violation = PROT_CONSTRAINT;
errcode = retval + ERROR_TABLE_BASE_krb5;
goto cleanup;
}
if (!is_local_principal(kdc_active_realm, header_enc_tkt->client))
setflag(c_flags, KRB5_KDB_FLAG_CROSS_REALM);
/* Check for protocol transition */
errcode = kdc_process_s4u2self_req(kdc_active_realm,
request,
header_enc_tkt->client,
server,
subkey,
header_enc_tkt->session,
kdc_time,
&s4u_x509_user,
&client,
&status);
if (s4u_x509_user != NULL || errcode != 0) {
if (s4u_x509_user != NULL)
au_state->s4u2self_user = s4u_x509_user->user_id.user;
if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION)
au_state->violation = PROT_CONSTRAINT;
au_state->status = status;
kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state);
au_state->s4u2self_user = NULL;
}
if (errcode)
goto cleanup;
if (s4u_x509_user != NULL) {
setflag(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION);
if (is_referral) {
/* The requesting server appears to no longer exist, and we found
* a referral instead. Treat this as a server lookup failure. */
errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
status = "LOOKING_UP_SERVER";
goto cleanup;
}
}
/* Deal with user-to-user and constrained delegation */
errcode = decrypt_2ndtkt(kdc_active_realm, request, c_flags,
&stkt_server, &status);
if (errcode)
goto cleanup;
if (isflagset(request->kdc_options, KDC_OPT_CNAME_IN_ADDL_TKT)) {
/* Do constrained delegation protocol and authorization checks */
errcode = kdc_process_s4u2proxy_req(kdc_active_realm,
request,
request->second_ticket[st_idx]->enc_part2,
stkt_server,
header_ticket->enc_part2->client,
request->server,
&status);
if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION)
au_state->violation = PROT_CONSTRAINT;
else if (errcode)
au_state->violation = LOCAL_POLICY;
au_state->status = status;
retval = kau_make_tkt_id(kdc_context, request->second_ticket[st_idx],
&au_state->evid_tkt_id);
if (retval) {
status = "GENERATE_TICKET_ID";
errcode = retval;
goto cleanup;
}
kau_s4u2proxy(kdc_context, errcode ? FALSE : TRUE, au_state);
if (errcode)
goto cleanup;
setflag(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION);
assert(krb5_is_tgs_principal(header_ticket->server));
assert(client == NULL); /* assured by kdc_process_s4u2self_req() */
client = stkt_server;
stkt_server = NULL;
} else if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) {
krb5_db_free_principal(kdc_context, stkt_server);
stkt_server = NULL;
} else
assert(stkt_server == NULL);
au_state->stage = ISSUE_TKT;
errcode = gen_session_key(kdc_active_realm, request, server, &session_key,
&status);
if (errcode)
goto cleanup;
/*
* subject_tkt will refer to the evidence ticket (for constrained
* delegation) or the TGT. The distinction from header_enc_tkt is
* necessary because the TGS signature only protects some fields:
* the others could be forged by a malicious server.
*/
if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION))
subject_tkt = request->second_ticket[st_idx]->enc_part2;
else
subject_tkt = header_enc_tkt;
authtime = subject_tkt->times.authtime;
/* Extract auth indicators from the subject ticket, except for S4U2Proxy
* requests (where the client didn't authenticate). */
if (s4u_x509_user == NULL) {
errcode = get_auth_indicators(kdc_context, subject_tkt, local_tgt,
&auth_indicators);
if (errcode) {
status = "GET_AUTH_INDICATORS";
goto cleanup;
}
}
errcode = check_indicators(kdc_context, server, auth_indicators);
if (errcode) {
status = "HIGHER_AUTHENTICATION_REQUIRED";
goto cleanup;
}
if (is_referral)
ticket_reply.server = server->princ;
else
ticket_reply.server = request->server; /* XXX careful for realm... */
enc_tkt_reply.flags = OPTS2FLAGS(request->kdc_options);
enc_tkt_reply.flags |= COPY_TKT_FLAGS(header_enc_tkt->flags);
enc_tkt_reply.times.starttime = 0;
if (isflagset(server->attributes, KRB5_KDB_OK_AS_DELEGATE))
setflag(enc_tkt_reply.flags, TKT_FLG_OK_AS_DELEGATE);
/* Indicate support for encrypted padata (RFC 6806). */
setflag(enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP);
/* don't use new addresses unless forwarded, see below */
enc_tkt_reply.caddrs = header_enc_tkt->caddrs;
/* noaddrarray[0] = 0; */
reply_encpart.caddrs = 0;/* optional...don't put it in */
reply_encpart.enc_padata = NULL;
/*
* It should be noted that local policy may affect the
* processing of any of these flags. For example, some
* realms may refuse to issue renewable tickets
*/
if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE)) {
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) {
/*
* If S4U2Self principal is not forwardable, then mark ticket as
* unforwardable. This behaviour matches Windows, but it is
* different to the MIT AS-REQ path, which returns an error
* (KDC_ERR_POLICY) if forwardable tickets cannot be issued.
*
* Consider this block the S4U2Self equivalent to
* validate_forwardable().
*/
if (client != NULL &&
isflagset(client->attributes, KRB5_KDB_DISALLOW_FORWARDABLE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
/*
* Forwardable flag is propagated along referral path.
*/
else if (!isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDABLE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
/*
* OK_TO_AUTH_AS_DELEGATE must be set on the service requesting
* S4U2Self in order for forwardable tickets to be returned.
*/
else if (!is_referral &&
!isflagset(server->attributes,
KRB5_KDB_OK_TO_AUTH_AS_DELEGATE))
clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE);
}
}
if (isflagset(request->kdc_options, KDC_OPT_FORWARDED) ||
isflagset(request->kdc_options, KDC_OPT_PROXY)) {
/* include new addresses in ticket & reply */
enc_tkt_reply.caddrs = request->addresses;
reply_encpart.caddrs = request->addresses;
}
/* We don't currently handle issuing anonymous tickets based on
* non-anonymous ones, so just ignore the option. */
if (isflagset(request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS) &&
!isflagset(header_enc_tkt->flags, TKT_FLG_ANONYMOUS))
clear(enc_tkt_reply.flags, TKT_FLG_ANONYMOUS);
if (isflagset(request->kdc_options, KDC_OPT_POSTDATED)) {
setflag(enc_tkt_reply.flags, TKT_FLG_INVALID);
enc_tkt_reply.times.starttime = request->from;
} else
enc_tkt_reply.times.starttime = kdc_time;
if (isflagset(request->kdc_options, KDC_OPT_VALIDATE)) {
assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0);
/* BEWARE of allocation hanging off of ticket & enc_part2, it belongs
to the caller */
ticket_reply = *(header_ticket);
enc_tkt_reply = *(header_ticket->enc_part2);
enc_tkt_reply.authorization_data = NULL;
clear(enc_tkt_reply.flags, TKT_FLG_INVALID);
}
if (isflagset(request->kdc_options, KDC_OPT_RENEW)) {
krb5_timestamp old_starttime;
krb5_deltat old_life;
assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0);
/* BEWARE of allocation hanging off of ticket & enc_part2, it belongs
to the caller */
ticket_reply = *(header_ticket);
enc_tkt_reply = *(header_ticket->enc_part2);
enc_tkt_reply.authorization_data = NULL;
old_starttime = enc_tkt_reply.times.starttime ?
enc_tkt_reply.times.starttime : enc_tkt_reply.times.authtime;
old_life = ts_delta(enc_tkt_reply.times.endtime, old_starttime);
enc_tkt_reply.times.starttime = kdc_time;
enc_tkt_reply.times.endtime =
ts_min(header_ticket->enc_part2->times.renew_till,
ts_incr(kdc_time, old_life));
} else {
/* not a renew request */
enc_tkt_reply.times.starttime = kdc_time;
kdc_get_ticket_endtime(kdc_active_realm, enc_tkt_reply.times.starttime,
header_enc_tkt->times.endtime, request->till,
client, server, &enc_tkt_reply.times.endtime);
}
kdc_get_ticket_renewtime(kdc_active_realm, request, header_enc_tkt, client,
server, &enc_tkt_reply);
/*
* Set authtime to be the same as header or evidence ticket's
*/
enc_tkt_reply.times.authtime = authtime;
/* starttime is optional, and treated as authtime if not present.
so we can nuke it if it matches */
if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime)
enc_tkt_reply.times.starttime = 0;
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) {
altcprinc = s4u_x509_user->user_id.user;
} else if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) {
altcprinc = subject_tkt->client;
} else {
altcprinc = NULL;
}
if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) {
krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2;
encrypting_key = *(t2enc->session);
} else {
/*
* Find the server key
*/
if ((errcode = krb5_dbe_find_enctype(kdc_context, server,
-1, /* ignore keytype */
-1, /* Ignore salttype */
0, /* Get highest kvno */
&server_key))) {
status = "FINDING_SERVER_KEY";
goto cleanup;
}
/*
* Convert server.key into a real key
* (it may be encrypted in the database)
*/
if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL,
server_key, &encrypting_key,
NULL))) {
status = "DECRYPT_SERVER_KEY";
goto cleanup;
}
}
if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) {
/*
* Don't allow authorization data to be disabled if constrained
* delegation is requested. We don't want to deny the server
* the ability to validate that delegation was used.
*/
clear(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED);
}
if (isflagset(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED) == 0) {
/*
* If we are not doing protocol transition/constrained delegation
* try to lookup the client principal so plugins can add additional
* authorization information.
*
* Always validate authorization data for constrained delegation
* because we must validate the KDC signatures.
*/
if (!isflagset(c_flags, KRB5_KDB_FLAGS_S4U)) {
/* Generate authorization data so we can include it in ticket */
setflag(c_flags, KRB5_KDB_FLAG_INCLUDE_PAC);
/* Map principals from foreign (possibly non-AD) realms */
setflag(c_flags, KRB5_KDB_FLAG_MAP_PRINCIPALS);
assert(client == NULL); /* should not have been set already */
errcode = krb5_db_get_principal(kdc_context, subject_tkt->client,
c_flags, &client);
}
}
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) &&
!isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM))
enc_tkt_reply.client = s4u_x509_user->user_id.user;
else
enc_tkt_reply.client = subject_tkt->client;
enc_tkt_reply.session = &session_key;
enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
enc_tkt_reply.transited.tr_contents = empty_string; /* equivalent of "" */
/*
* Only add the realm of the presented tgt to the transited list if
* it is different than the local realm (cross-realm) and it is different
* than the realm of the client (since the realm of the client is already
* implicitly part of the transited list and should not be explicitly
* listed).
*/
/* realm compare is like strcmp, but knows how to deal with these args */
if (krb5_realm_compare(kdc_context, header_ticket->server, tgs_server) ||
krb5_realm_compare(kdc_context, header_ticket->server,
enc_tkt_reply.client)) {
/* tgt issued by local realm or issued by realm of client */
enc_tkt_reply.transited = header_enc_tkt->transited;
} else {
/* tgt issued by some other realm and not the realm of the client */
/* assemble new transited field into allocated storage */
if (header_enc_tkt->transited.tr_type !=
KRB5_DOMAIN_X500_COMPRESS) {
status = "VALIDATE_TRANSIT_TYPE";
errcode = KRB5KDC_ERR_TRTYPE_NOSUPP;
goto cleanup;
}
memset(&enc_tkt_reply.transited, 0, sizeof(enc_tkt_reply.transited));
enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS;
if ((errcode =
add_to_transited(&header_enc_tkt->transited.tr_contents,
&enc_tkt_reply.transited.tr_contents,
header_ticket->server,
enc_tkt_reply.client,
request->server))) {
status = "ADD_TO_TRANSITED_LIST";
goto cleanup;
}
newtransited = 1;
}
if (isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) {
errcode = validate_transit_path(kdc_context, header_enc_tkt->client,
server, header_server);
if (errcode) {
status = "NON_TRANSITIVE";
goto cleanup;
}
}
if (!isflagset (request->kdc_options, KDC_OPT_DISABLE_TRANSITED_CHECK)) {
errcode = kdc_check_transited_list (kdc_active_realm,
&enc_tkt_reply.transited.tr_contents,
krb5_princ_realm (kdc_context, header_enc_tkt->client),
krb5_princ_realm (kdc_context, request->server));
if (errcode == 0) {
setflag (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED);
} else {
log_tgs_badtrans(kdc_context, cprinc, sprinc,
&enc_tkt_reply.transited.tr_contents, errcode);
}
} else
krb5_klog_syslog(LOG_INFO, _("not checking transit path"));
if (kdc_active_realm->realm_reject_bad_transit &&
!isflagset(enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED)) {
errcode = KRB5KDC_ERR_POLICY;
status = "BAD_TRANSIT";
au_state->violation = LOCAL_POLICY;
goto cleanup;
}
errcode = handle_authdata(kdc_context, c_flags, client, server,
header_server, local_tgt,
subkey != NULL ? subkey :
header_ticket->enc_part2->session,
&encrypting_key, /* U2U or server key */
header_key,
pkt,
request,
s4u_x509_user ?
s4u_x509_user->user_id.user : NULL,
subject_tkt,
auth_indicators,
&enc_tkt_reply);
if (errcode) {
krb5_klog_syslog(LOG_INFO, _("TGS_REQ : handle_authdata (%d)"),
errcode);
status = "HANDLE_AUTHDATA";
goto cleanup;
}
ticket_reply.enc_part2 = &enc_tkt_reply;
/*
* If we are doing user-to-user authentication, then make sure
* that the client for the second ticket matches the request
* server, and then encrypt the ticket using the session key of
* the second ticket.
*/
if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) {
/*
* Make sure the client for the second ticket matches
* requested server.
*/
krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2;
krb5_principal client2 = t2enc->client;
if (!krb5_principal_compare(kdc_context, request->server, client2)) {
altcprinc = client2;
errcode = KRB5KDC_ERR_SERVER_NOMATCH;
status = "2ND_TKT_MISMATCH";
au_state->status = status;
kau_u2u(kdc_context, FALSE, au_state);
goto cleanup;
}
ticket_kvno = 0;
ticket_reply.enc_part.enctype = t2enc->session->enctype;
kau_u2u(kdc_context, TRUE, au_state);
st_idx++;
} else {
ticket_kvno = server_key->key_data_kvno;
}
errcode = krb5_encrypt_tkt_part(kdc_context, &encrypting_key,
&ticket_reply);
if (!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY))
krb5_free_keyblock_contents(kdc_context, &encrypting_key);
if (errcode) {
status = "ENCRYPT_TICKET";
goto cleanup;
}
ticket_reply.enc_part.kvno = ticket_kvno;
/* Start assembling the response */
au_state->stage = ENCR_REP;
reply.msg_type = KRB5_TGS_REP;
if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) &&
krb5int_find_pa_data(kdc_context, request->padata,
KRB5_PADATA_S4U_X509_USER) != NULL) {
errcode = kdc_make_s4u2self_rep(kdc_context,
subkey,
header_ticket->enc_part2->session,
s4u_x509_user,
&reply,
&reply_encpart);
if (errcode) {
status = "MAKE_S4U2SELF_PADATA";
au_state->status = status;
}
kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state);
if (errcode)
goto cleanup;
}
reply.client = enc_tkt_reply.client;
reply.enc_part.kvno = 0;/* We are using the session key */
reply.ticket = &ticket_reply;
reply_encpart.session = &session_key;
reply_encpart.nonce = request->nonce;
/* copy the time fields */
reply_encpart.times = enc_tkt_reply.times;
nolrentry.lr_type = KRB5_LRQ_NONE;
nolrentry.value = 0;
nolrentry.magic = 0;
nolrarray[0] = &nolrentry;
nolrarray[1] = 0;
reply_encpart.last_req = nolrarray; /* not available for TGS reqs */
reply_encpart.key_exp = 0;/* ditto */
reply_encpart.flags = enc_tkt_reply.flags;
reply_encpart.server = ticket_reply.server;
/* use the session key in the ticket, unless there's a subsession key
in the AP_REQ */
reply.enc_part.enctype = subkey ? subkey->enctype :
header_ticket->enc_part2->session->enctype;
errcode = kdc_fast_response_handle_padata(state, request, &reply,
subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype);
if (errcode !=0 ) {
status = "MAKE_FAST_RESPONSE";
goto cleanup;
}
errcode =kdc_fast_handle_reply_key(state,
subkey?subkey:header_ticket->enc_part2->session, &reply_key);
if (errcode) {
status = "MAKE_FAST_REPLY_KEY";
goto cleanup;
}
errcode = return_enc_padata(kdc_context, pkt, request,
reply_key, server, &reply_encpart,
is_referral &&
isflagset(s_flags,
KRB5_KDB_FLAG_CANONICALIZE));
if (errcode) {
status = "KDC_RETURN_ENC_PADATA";
goto cleanup;
}
errcode = kau_make_tkt_id(kdc_context, &ticket_reply, &au_state->tkt_out_id);
if (errcode) {
status = "GENERATE_TICKET_ID";
goto cleanup;
}
if (kdc_fast_hide_client(state))
reply.client = (krb5_principal)krb5_anonymous_principal();
errcode = krb5_encode_kdc_rep(kdc_context, KRB5_TGS_REP, &reply_encpart,
subkey ? 1 : 0,
reply_key,
&reply, response);
if (errcode) {
status = "ENCODE_KDC_REP";
} else {
status = "ISSUE";
}
memset(ticket_reply.enc_part.ciphertext.data, 0,
ticket_reply.enc_part.ciphertext.length);
free(ticket_reply.enc_part.ciphertext.data);
/* these parts are left on as a courtesy from krb5_encode_kdc_rep so we
can use them in raw form if needed. But, we don't... */
memset(reply.enc_part.ciphertext.data, 0,
reply.enc_part.ciphertext.length);
free(reply.enc_part.ciphertext.data);
cleanup:
if (status == NULL)
status = "UNKNOWN_REASON";
if (reply_key)
krb5_free_keyblock(kdc_context, reply_key);
if (errcode)
emsg = krb5_get_error_message (kdc_context, errcode);
au_state->status = status;
if (!errcode)
au_state->reply = &reply;
kau_tgs_req(kdc_context, errcode ? FALSE : TRUE, au_state);
kau_free_kdc_req(au_state);
log_tgs_req(kdc_context, from, request, &reply, cprinc,
sprinc, altcprinc, authtime,
c_flags, status, errcode, emsg);
if (errcode) {
krb5_free_error_message (kdc_context, emsg);
emsg = NULL;
}
if (errcode) {
int got_err = 0;
if (status == 0) {
status = krb5_get_error_message (kdc_context, errcode);
got_err = 1;
}
errcode -= ERROR_TABLE_BASE_krb5;
if (errcode < 0 || errcode > KRB_ERR_MAX)
errcode = KRB_ERR_GENERIC;
retval = prepare_error_tgs(state, request, header_ticket, errcode,
(server != NULL) ? server->princ : NULL,
response, status, e_data);
if (got_err) {
krb5_free_error_message (kdc_context, status);
status = 0;
}
}
if (header_ticket != NULL)
krb5_free_ticket(kdc_context, header_ticket);
if (request != NULL)
krb5_free_kdc_req(kdc_context, request);
if (state)
kdc_free_rstate(state);
krb5_db_free_principal(kdc_context, server);
krb5_db_free_principal(kdc_context, stkt_server);
krb5_db_free_principal(kdc_context, header_server);
krb5_db_free_principal(kdc_context, client);
krb5_db_free_principal(kdc_context, local_tgt_storage);
if (session_key.contents != NULL)
krb5_free_keyblock_contents(kdc_context, &session_key);
if (newtransited)
free(enc_tkt_reply.transited.tr_contents.data);
if (s4u_x509_user != NULL)
krb5_free_pa_s4u_x509_user(kdc_context, s4u_x509_user);
if (kdc_issued_auth_data != NULL)
krb5_free_authdata(kdc_context, kdc_issued_auth_data);
if (subkey != NULL)
krb5_free_keyblock(kdc_context, subkey);
if (header_key != NULL)
krb5_free_keyblock(kdc_context, header_key);
if (reply.padata)
krb5_free_pa_data(kdc_context, reply.padata);
if (reply_encpart.enc_padata)
krb5_free_pa_data(kdc_context, reply_encpart.enc_padata);
if (enc_tkt_reply.authorization_data != NULL)
krb5_free_authdata(kdc_context, enc_tkt_reply.authorization_data);
krb5_free_pa_data(kdc_context, e_data);
k5_free_data_ptr_list(auth_indicators);
return retval;
}
static krb5_error_code
prepare_error_tgs (struct kdc_request_state *state,
krb5_kdc_req *request, krb5_ticket *ticket, int error,
krb5_principal canon_server,
krb5_data **response, const char *status,
krb5_pa_data **e_data)
{
krb5_error errpkt;
krb5_error_code retval = 0;
krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL;
kdc_realm_t *kdc_active_realm = state->realm_data;
errpkt.magic = KV5M_ERROR;
errpkt.ctime = request->nonce;
errpkt.cusec = 0;
if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime,
&errpkt.susec)))
return(retval);
errpkt.error = error;
errpkt.server = request->server;
if (ticket && ticket->enc_part2)
errpkt.client = ticket->enc_part2->client;
else
errpkt.client = NULL;
errpkt.text.length = strlen(status);
if (!(errpkt.text.data = strdup(status)))
return ENOMEM;
if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) {
free(errpkt.text.data);
return ENOMEM;
}
if (e_data != NULL) {
retval = encode_krb5_padata_sequence(e_data, &e_data_asn1);
if (retval) {
free(scratch);
free(errpkt.text.data);
return retval;
}
errpkt.e_data = *e_data_asn1;
} else
errpkt.e_data = empty_data();
retval = kdc_fast_handle_error(kdc_context, state, request, e_data,
&errpkt, &fast_edata);
if (retval) {
free(scratch);
free(errpkt.text.data);
krb5_free_data(kdc_context, e_data_asn1);
return retval;
}
if (fast_edata)
errpkt.e_data = *fast_edata;
if (kdc_fast_hide_client(state) && errpkt.client != NULL)
errpkt.client = (krb5_principal)krb5_anonymous_principal();
retval = krb5_mk_error(kdc_context, &errpkt, scratch);
free(errpkt.text.data);
krb5_free_data(kdc_context, e_data_asn1);
krb5_free_data(kdc_context, fast_edata);
if (retval)
free(scratch);
else
*response = scratch;
return retval;
}
/* KDC options that require a second ticket */
#define STKT_OPTIONS (KDC_OPT_CNAME_IN_ADDL_TKT | KDC_OPT_ENC_TKT_IN_SKEY)
/*
* Get the key for the second ticket, if any, and decrypt it.
*/
static krb5_error_code
decrypt_2ndtkt(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_flags flags, krb5_db_entry **server_out,
const char **status)
{
krb5_error_code retval;
krb5_db_entry *server = NULL;
krb5_keyblock *key;
krb5_kvno kvno;
krb5_ticket *stkt;
if (!(req->kdc_options & STKT_OPTIONS))
return 0;
stkt = req->second_ticket[0];
retval = kdc_get_server_key(kdc_context, stkt,
flags,
TRUE, /* match_enctype */
&server,
&key,
&kvno);
if (retval != 0) {
*status = "2ND_TKT_SERVER";
goto cleanup;
}
retval = krb5_decrypt_tkt_part(kdc_context, key,
req->second_ticket[0]);
krb5_free_keyblock(kdc_context, key);
if (retval != 0) {
*status = "2ND_TKT_DECRYPT";
goto cleanup;
}
*server_out = server;
server = NULL;
cleanup:
krb5_db_free_principal(kdc_context, server);
return retval;
}
static krb5_error_code
get_2ndtkt_enctype(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_enctype *useenctype, const char **status)
{
krb5_enctype etype;
krb5_ticket *stkt = req->second_ticket[0];
int i;
etype = stkt->enc_part2->session->enctype;
if (!krb5_c_valid_enctype(etype)) {
*status = "BAD_ETYPE_IN_2ND_TKT";
return KRB5KDC_ERR_ETYPE_NOSUPP;
}
for (i = 0; i < req->nktypes; i++) {
if (req->ktype[i] == etype) {
*useenctype = etype;
break;
}
}
return 0;
}
static krb5_error_code
gen_session_key(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_db_entry *server, krb5_keyblock *skey,
const char **status)
{
krb5_error_code retval;
krb5_enctype useenctype = 0;
/*
* Some special care needs to be taken in the user-to-user
* case, since we don't know what keytypes the application server
* which is doing user-to-user authentication can support. We
* know that it at least must be able to support the encryption
* type of the session key in the TGT, since otherwise it won't be
* able to decrypt the U2U ticket! So we use that in preference
* to anything else.
*/
if (req->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) {
retval = get_2ndtkt_enctype(kdc_active_realm, req, &useenctype,
status);
if (retval != 0)
goto cleanup;
}
if (useenctype == 0) {
useenctype = select_session_keytype(kdc_active_realm, server,
req->nktypes,
req->ktype);
}
if (useenctype == 0) {
/* unsupported ktype */
*status = "BAD_ENCRYPTION_TYPE";
retval = KRB5KDC_ERR_ETYPE_NOSUPP;
goto cleanup;
}
retval = krb5_c_make_random_key(kdc_context, useenctype, skey);
if (retval != 0) {
/* random key failed */
*status = "MAKE_RANDOM_KEY";
goto cleanup;
}
cleanup:
return retval;
}
/*
* The request seems to be for a ticket-granting service somewhere else,
* but we don't have a ticket for the final TGS. Try to give the requestor
* some intermediate realm.
*/
static krb5_error_code
find_alternate_tgs(kdc_realm_t *kdc_active_realm, krb5_principal princ,
krb5_db_entry **server_ptr, const char **status)
{
krb5_error_code retval;
krb5_principal *plist = NULL, *pl2;
krb5_data tmp;
krb5_db_entry *server = NULL;
*server_ptr = NULL;
assert(is_cross_tgs_principal(princ));
if ((retval = krb5_walk_realm_tree(kdc_context,
krb5_princ_realm(kdc_context, princ),
krb5_princ_component(kdc_context, princ, 1),
&plist, KRB5_REALM_BRANCH_CHAR))) {
goto cleanup;
}
/* move to the end */
for (pl2 = plist; *pl2; pl2++);
/* the first entry in this array is for krbtgt/local@local, so we
ignore it */
while (--pl2 > plist) {
tmp = *krb5_princ_realm(kdc_context, *pl2);
krb5_princ_set_realm(kdc_context, *pl2,
krb5_princ_realm(kdc_context, princ));
retval = db_get_svc_princ(kdc_context, *pl2, 0, &server, status);
krb5_princ_set_realm(kdc_context, *pl2, &tmp);
if (retval == KRB5_KDB_NOENTRY)
continue;
else if (retval)
goto cleanup;
log_tgs_alt_tgt(kdc_context, server->princ);
*server_ptr = server;
server = NULL;
goto cleanup;
}
cleanup:
if (retval == 0 && *server_ptr == NULL)
retval = KRB5_KDB_NOENTRY;
if (retval != 0)
*status = "UNKNOWN_SERVER";
krb5_free_realm_tree(kdc_context, plist);
krb5_db_free_principal(kdc_context, server);
return retval;
}
/* Return true if item is an element of the space/comma-separated list. */
static krb5_boolean
in_list(const char *list, const char *item)
{
const char *p;
int len = strlen(item);
if (list == NULL)
return FALSE;
for (p = strstr(list, item); p != NULL; p = strstr(p + 1, item)) {
if ((p == list || isspace((unsigned char)p[-1]) || p[-1] == ',') &&
(p[len] == '\0' || isspace((unsigned char)p[len]) ||
p[len] == ','))
return TRUE;
}
return FALSE;
}
/*
* Check whether the request satisfies the conditions for generating a referral
* TGT. The caller checks whether the hostname component looks like a FQDN.
*/
static krb5_boolean
is_referral_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request)
{
krb5_boolean ret = FALSE;
char *stype = NULL;
char *hostbased = kdc_active_realm->realm_hostbased;
char *no_referral = kdc_active_realm->realm_no_referral;
if (!(request->kdc_options & KDC_OPT_CANONICALIZE))
return FALSE;
if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY)
return FALSE;
if (krb5_princ_size(kdc_context, request->server) != 2)
return FALSE;
stype = data2string(krb5_princ_component(kdc_context, request->server, 0));
if (stype == NULL)
return FALSE;
switch (krb5_princ_type(kdc_context, request->server)) {
case KRB5_NT_UNKNOWN:
/* Allow referrals for NT-UNKNOWN principals, if configured. */
if (!in_list(hostbased, stype) && !in_list(hostbased, "*"))
goto cleanup;
/* FALLTHROUGH */
case KRB5_NT_SRV_HST:
case KRB5_NT_SRV_INST:
/* Deny referrals for specific service types, if configured. */
if (in_list(no_referral, stype) || in_list(no_referral, "*"))
goto cleanup;
ret = TRUE;
break;
default:
goto cleanup;
}
cleanup:
free(stype);
return ret;
}
/*
* Find a remote realm TGS principal for an unknown host-based service
* principal.
*/
static krb5_int32
find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request,
krb5_principal *krbtgt_princ)
{
krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
char **realms = NULL, *hostname = NULL;
krb5_data srealm = request->server->realm;
if (!is_referral_req(kdc_active_realm, request))
goto cleanup;
hostname = data2string(krb5_princ_component(kdc_context,
request->server, 1));
if (hostname == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* If the hostname doesn't contain a '.', it's not a FQDN. */
if (strchr(hostname, '.') == NULL)
goto cleanup;
retval = krb5_get_host_realm(kdc_context, hostname, &realms);
if (retval) {
/* no match found */
kdc_err(kdc_context, retval, "unable to find realm of host");
goto cleanup;
}
/* Don't return a referral to the empty realm or the service realm. */
if (realms == NULL || realms[0] == NULL || *realms[0] == '\0' ||
data_eq_string(srealm, realms[0])) {
retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto cleanup;
}
retval = krb5_build_principal(kdc_context, krbtgt_princ,
srealm.length, srealm.data,
"krbtgt", realms[0], (char *)0);
cleanup:
krb5_free_host_realm(kdc_context, realms);
free(hostname);
return retval;
}
static krb5_error_code
db_get_svc_princ(krb5_context ctx, krb5_principal princ,
krb5_flags flags, krb5_db_entry **server,
const char **status)
{
krb5_error_code ret;
ret = krb5_db_get_principal(ctx, princ, flags, server);
if (ret == KRB5_KDB_CANTLOCK_DB)
ret = KRB5KDC_ERR_SVC_UNAVAILABLE;
if (ret != 0) {
*status = "LOOKING_UP_SERVER";
}
return ret;
}
static krb5_error_code
search_sprinc(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req,
krb5_flags flags, krb5_db_entry **server, const char **status)
{
krb5_error_code ret;
krb5_principal princ = req->server;
krb5_principal reftgs = NULL;
krb5_boolean allow_referral;
/* Do not allow referrals for u2u or ticket modification requests, because
* the server is supposed to match an already-issued ticket. */
allow_referral = !(req->kdc_options & NO_REFERRAL_OPTION);
if (!allow_referral)
flags &= ~KRB5_KDB_FLAG_CANONICALIZE;
ret = db_get_svc_princ(kdc_context, princ, flags, server, status);
if (ret == 0 || ret != KRB5_KDB_NOENTRY || !allow_referral)
goto cleanup;
if (!is_cross_tgs_principal(req->server)) {
ret = find_referral_tgs(kdc_active_realm, req, &reftgs);
if (ret != 0)
goto cleanup;
ret = db_get_svc_princ(kdc_context, reftgs, flags, server, status);
if (ret == 0 || ret != KRB5_KDB_NOENTRY)
goto cleanup;
princ = reftgs;
}
ret = find_alternate_tgs(kdc_active_realm, princ, server, status);
cleanup:
if (ret != 0 && ret != KRB5KDC_ERR_SVC_UNAVAILABLE) {
ret = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
if (*status == NULL)
*status = "LOOKING_UP_SERVER";
}
krb5_free_principal(kdc_context, reftgs);
return ret;
}
|
12,422,060,022,640 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/krb/s4u_creds.c */
/*
* Copyright (C) 2009 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "k5-int.h"
#include "int-proto.h"
/* Convert ticket flags to necessary KDC options */
#define FLAGS2OPTS(flags) (flags & KDC_TKT_COMMON_MASK)
/*
* Implements S4U2Self, by which a service can request a ticket to
* itself on behalf of an arbitrary principal.
*/
static krb5_error_code
krb5_get_as_key_noop(
krb5_context context,
krb5_principal client,
krb5_enctype etype,
krb5_prompter_fct prompter,
void *prompter_data,
krb5_data *salt,
krb5_data *params,
krb5_keyblock *as_key,
void *gak_data,
k5_response_items *ritems)
{
/* force a hard error, we don't actually have the key */
return KRB5_PREAUTH_FAILED;
}
static krb5_error_code
s4u_identify_user(krb5_context context,
krb5_creds *in_creds,
krb5_data *subject_cert,
krb5_principal *canon_user)
{
krb5_error_code code;
krb5_preauthtype ptypes[1] = { KRB5_PADATA_S4U_X509_USER };
krb5_creds creds;
int use_master = 0;
krb5_get_init_creds_opt *opts = NULL;
krb5_principal_data client;
krb5_s4u_userid userid;
*canon_user = NULL;
if (in_creds->client == NULL && subject_cert == NULL) {
return EINVAL;
}
if (in_creds->client != NULL &&
in_creds->client->type != KRB5_NT_ENTERPRISE_PRINCIPAL) {
int anonymous;
anonymous = krb5_principal_compare(context, in_creds->client,
krb5_anonymous_principal());
return krb5_copy_principal(context,
anonymous ? in_creds->server
: in_creds->client,
canon_user);
}
memset(&creds, 0, sizeof(creds));
memset(&userid, 0, sizeof(userid));
if (subject_cert != NULL)
userid.subject_cert = *subject_cert;
code = krb5_get_init_creds_opt_alloc(context, &opts);
if (code != 0)
goto cleanup;
krb5_get_init_creds_opt_set_tkt_life(opts, 15);
krb5_get_init_creds_opt_set_renew_life(opts, 0);
krb5_get_init_creds_opt_set_forwardable(opts, 0);
krb5_get_init_creds_opt_set_proxiable(opts, 0);
krb5_get_init_creds_opt_set_canonicalize(opts, 1);
krb5_get_init_creds_opt_set_preauth_list(opts, ptypes, 1);
if (in_creds->client != NULL) {
client = *in_creds->client;
client.realm = in_creds->server->realm;
} else {
client.magic = KV5M_PRINCIPAL;
client.realm = in_creds->server->realm;
/* should this be NULL, empty or a fixed string? XXX */
client.data = NULL;
client.length = 0;
client.type = KRB5_NT_ENTERPRISE_PRINCIPAL;
}
code = k5_get_init_creds(context, &creds, &client, NULL, NULL, 0, NULL,
opts, krb5_get_as_key_noop, &userid, &use_master,
NULL);
if (code == 0 || code == KRB5_PREAUTH_FAILED) {
*canon_user = userid.user;
userid.user = NULL;
code = 0;
}
cleanup:
krb5_free_cred_contents(context, &creds);
if (opts != NULL)
krb5_get_init_creds_opt_free(context, opts);
if (userid.user != NULL)
krb5_free_principal(context, userid.user);
return code;
}
static krb5_error_code
make_pa_for_user_checksum(krb5_context context,
krb5_keyblock *key,
krb5_pa_for_user *req,
krb5_checksum *cksum)
{
krb5_error_code code;
int i;
char *p;
krb5_data data;
data.length = 4;
for (i = 0; i < req->user->length; i++)
data.length += req->user->data[i].length;
data.length += req->user->realm.length;
data.length += req->auth_package.length;
p = data.data = malloc(data.length);
if (data.data == NULL)
return ENOMEM;
p[0] = (req->user->type >> 0) & 0xFF;
p[1] = (req->user->type >> 8) & 0xFF;
p[2] = (req->user->type >> 16) & 0xFF;
p[3] = (req->user->type >> 24) & 0xFF;
p += 4;
for (i = 0; i < req->user->length; i++) {
if (req->user->data[i].length > 0)
memcpy(p, req->user->data[i].data, req->user->data[i].length);
p += req->user->data[i].length;
}
if (req->user->realm.length > 0)
memcpy(p, req->user->realm.data, req->user->realm.length);
p += req->user->realm.length;
if (req->auth_package.length > 0)
memcpy(p, req->auth_package.data, req->auth_package.length);
/* Per spec, use hmac-md5 checksum regardless of key type. */
code = krb5_c_make_checksum(context, CKSUMTYPE_HMAC_MD5_ARCFOUR, key,
KRB5_KEYUSAGE_APP_DATA_CKSUM, &data,
cksum);
free(data.data);
return code;
}
static krb5_error_code
build_pa_for_user(krb5_context context,
krb5_creds *tgt,
krb5_s4u_userid *userid,
krb5_pa_data **out_padata)
{
krb5_error_code code;
krb5_pa_data *padata;
krb5_pa_for_user for_user;
krb5_data *for_user_data = NULL;
char package[] = "Kerberos";
if (userid->user == NULL)
return EINVAL;
memset(&for_user, 0, sizeof(for_user));
for_user.user = userid->user;
for_user.auth_package.data = package;
for_user.auth_package.length = sizeof(package) - 1;
code = make_pa_for_user_checksum(context, &tgt->keyblock,
&for_user, &for_user.cksum);
if (code != 0)
goto cleanup;
code = encode_krb5_pa_for_user(&for_user, &for_user_data);
if (code != 0)
goto cleanup;
padata = malloc(sizeof(*padata));
if (padata == NULL) {
code = ENOMEM;
goto cleanup;
}
padata->magic = KV5M_PA_DATA;
padata->pa_type = KRB5_PADATA_FOR_USER;
padata->length = for_user_data->length;
padata->contents = (krb5_octet *)for_user_data->data;
free(for_user_data);
for_user_data = NULL;
*out_padata = padata;
cleanup:
if (for_user.cksum.contents != NULL)
krb5_free_checksum_contents(context, &for_user.cksum);
krb5_free_data(context, for_user_data);
return code;
}
/*
* This function is invoked by krb5int_make_tgs_request_ext() just before the
* request is encoded; it gives us access to the nonce and subkey without
* requiring them to be generated by the caller.
*/
static krb5_error_code
build_pa_s4u_x509_user(krb5_context context,
krb5_keyblock *subkey,
krb5_kdc_req *tgsreq,
void *gcvt_data)
{
krb5_error_code code;
krb5_pa_s4u_x509_user *s4u_user = (krb5_pa_s4u_x509_user *)gcvt_data;
krb5_data *data = NULL;
krb5_pa_data **padata;
krb5_cksumtype cksumtype;
int i;
assert(s4u_user->cksum.contents == NULL);
s4u_user->user_id.nonce = tgsreq->nonce;
code = encode_krb5_s4u_userid(&s4u_user->user_id, &data);
if (code != 0)
goto cleanup;
/* [MS-SFU] 2.2.2: unusual to say the least, but enc_padata secures it */
if (subkey->enctype == ENCTYPE_ARCFOUR_HMAC ||
subkey->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
cksumtype = CKSUMTYPE_RSA_MD4;
} else {
code = krb5int_c_mandatory_cksumtype(context, subkey->enctype,
&cksumtype);
}
if (code != 0)
goto cleanup;
code = krb5_c_make_checksum(context, cksumtype, subkey,
KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data,
&s4u_user->cksum);
if (code != 0)
goto cleanup;
krb5_free_data(context, data);
data = NULL;
code = encode_krb5_pa_s4u_x509_user(s4u_user, &data);
if (code != 0)
goto cleanup;
assert(tgsreq->padata != NULL);
for (i = 0; tgsreq->padata[i] != NULL; i++)
;
padata = realloc(tgsreq->padata,
(i + 2) * sizeof(krb5_pa_data *));
if (padata == NULL) {
code = ENOMEM;
goto cleanup;
}
tgsreq->padata = padata;
padata[i] = malloc(sizeof(krb5_pa_data));
if (padata[i] == NULL) {
code = ENOMEM;
goto cleanup;
}
padata[i]->magic = KV5M_PA_DATA;
padata[i]->pa_type = KRB5_PADATA_S4U_X509_USER;
padata[i]->length = data->length;
padata[i]->contents = (krb5_octet *)data->data;
padata[i + 1] = NULL;
free(data);
data = NULL;
cleanup:
if (code != 0 && s4u_user->cksum.contents != NULL) {
krb5_free_checksum_contents(context, &s4u_user->cksum);
s4u_user->cksum.contents = NULL;
}
krb5_free_data(context, data);
return code;
}
static krb5_error_code
verify_s4u2self_reply(krb5_context context,
krb5_keyblock *subkey,
krb5_pa_s4u_x509_user *req_s4u_user,
krb5_pa_data **rep_padata,
krb5_pa_data **enc_padata)
{
krb5_error_code code;
krb5_pa_data *rep_s4u_padata, *enc_s4u_padata;
krb5_pa_s4u_x509_user *rep_s4u_user = NULL;
krb5_data data, *datap = NULL;
krb5_keyusage usage;
krb5_boolean valid;
krb5_boolean not_newer;
assert(req_s4u_user != NULL);
switch (subkey->enctype) {
case ENCTYPE_DES_CBC_CRC:
case ENCTYPE_DES_CBC_MD4:
case ENCTYPE_DES_CBC_MD5:
case ENCTYPE_DES3_CBC_SHA1:
case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
case ENCTYPE_ARCFOUR_HMAC_EXP :
not_newer = TRUE;
break;
default:
not_newer = FALSE;
break;
}
enc_s4u_padata = krb5int_find_pa_data(context,
enc_padata,
KRB5_PADATA_S4U_X509_USER);
/* XXX this will break newer enctypes with a MIT 1.7 KDC */
rep_s4u_padata = krb5int_find_pa_data(context,
rep_padata,
KRB5_PADATA_S4U_X509_USER);
if (rep_s4u_padata == NULL) {
if (not_newer == FALSE || enc_s4u_padata != NULL)
return KRB5_KDCREP_MODIFIED;
else
return 0;
}
data.length = rep_s4u_padata->length;
data.data = (char *)rep_s4u_padata->contents;
code = decode_krb5_pa_s4u_x509_user(&data, &rep_s4u_user);
if (code != 0)
goto cleanup;
if (rep_s4u_user->user_id.nonce != req_s4u_user->user_id.nonce) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
code = encode_krb5_s4u_userid(&rep_s4u_user->user_id, &datap);
if (code != 0)
goto cleanup;
if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE)
usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY;
else
usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST;
code = krb5_c_verify_checksum(context, subkey, usage, datap,
&rep_s4u_user->cksum, &valid);
if (code != 0)
goto cleanup;
if (valid == FALSE) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
/*
* KDCs that support KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE also return
* S4U enc_padata for older (pre-AES) encryption types only.
*/
if (not_newer) {
if (enc_s4u_padata == NULL) {
if (rep_s4u_user->user_id.options &
KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
} else {
if (enc_s4u_padata->length !=
req_s4u_user->cksum.length + rep_s4u_user->cksum.length) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
if (memcmp(enc_s4u_padata->contents,
req_s4u_user->cksum.contents,
req_s4u_user->cksum.length) ||
memcmp(&enc_s4u_padata->contents[req_s4u_user->cksum.length],
rep_s4u_user->cksum.contents,
rep_s4u_user->cksum.length)) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
}
} else if (!krb5_c_is_keyed_cksum(rep_s4u_user->cksum.checksum_type)) {
code = KRB5KRB_AP_ERR_INAPP_CKSUM;
goto cleanup;
}
cleanup:
krb5_free_pa_s4u_x509_user(context, rep_s4u_user);
krb5_free_data(context, datap);
return code;
}
/* Unparse princ and re-parse it as an enterprise principal. */
static krb5_error_code
convert_to_enterprise(krb5_context context, krb5_principal princ,
krb5_principal *eprinc_out)
{
krb5_error_code code;
char *str;
*eprinc_out = NULL;
code = krb5_unparse_name(context, princ, &str);
if (code != 0)
return code;
code = krb5_parse_name_flags(context, str,
KRB5_PRINCIPAL_PARSE_ENTERPRISE |
KRB5_PRINCIPAL_PARSE_IGNORE_REALM,
eprinc_out);
krb5_free_unparsed_name(context, str);
return code;
}
static krb5_error_code
krb5_get_self_cred_from_kdc(krb5_context context,
krb5_flags options,
krb5_ccache ccache,
krb5_creds *in_creds,
krb5_data *subject_cert,
krb5_data *user_realm,
krb5_creds **out_creds)
{
krb5_error_code code;
krb5_principal tgs = NULL, eprinc = NULL;
krb5_principal_data sprinc;
krb5_creds tgtq, s4u_creds, *tgt = NULL, *tgtptr;
krb5_creds *referral_tgts[KRB5_REFERRAL_MAXHOPS];
krb5_pa_s4u_x509_user s4u_user;
int referral_count = 0, i;
krb5_flags kdcopt;
memset(&tgtq, 0, sizeof(tgtq));
memset(referral_tgts, 0, sizeof(referral_tgts));
*out_creds = NULL;
memset(&s4u_user, 0, sizeof(s4u_user));
if (in_creds->client != NULL && in_creds->client->length > 0) {
if (in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) {
code = krb5_build_principal_ext(context,
&s4u_user.user_id.user,
user_realm->length,
user_realm->data,
in_creds->client->data[0].length,
in_creds->client->data[0].data,
0);
if (code != 0)
goto cleanup;
s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL;
} else {
code = krb5_copy_principal(context,
in_creds->client,
&s4u_user.user_id.user);
if (code != 0)
goto cleanup;
}
} else {
code = krb5_build_principal_ext(context, &s4u_user.user_id.user,
user_realm->length,
user_realm->data);
if (code != 0)
goto cleanup;
s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL;
}
if (subject_cert != NULL)
s4u_user.user_id.subject_cert = *subject_cert;
s4u_user.user_id.options = KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE;
/* First, acquire a TGT to the user's realm. */
code = krb5int_tgtname(context, user_realm, &in_creds->server->realm,
&tgs);
if (code != 0)
goto cleanup;
tgtq.client = in_creds->server;
tgtq.server = tgs;
code = krb5_get_credentials(context, options, ccache, &tgtq, &tgt);
if (code != 0)
goto cleanup;
tgtptr = tgt;
/* Convert the server principal to an enterprise principal, for use with
* foreign realms. */
code = convert_to_enterprise(context, in_creds->server, &eprinc);
if (code != 0)
goto cleanup;
/* Make a shallow copy of in_creds with client pointing to the server
* principal. We will set s4u_creds.server for each request. */
s4u_creds = *in_creds;
s4u_creds.client = in_creds->server;
/* Then, walk back the referral path to S4U2Self for user */
kdcopt = 0;
if (options & KRB5_GC_CANONICALIZE)
kdcopt |= KDC_OPT_CANONICALIZE;
if (options & KRB5_GC_FORWARDABLE)
kdcopt |= KDC_OPT_FORWARDABLE;
if (options & KRB5_GC_NO_TRANSIT_CHECK)
kdcopt |= KDC_OPT_DISABLE_TRANSITED_CHECK;
for (referral_count = 0;
referral_count < KRB5_REFERRAL_MAXHOPS;
referral_count++)
{
krb5_pa_data **in_padata = NULL;
krb5_pa_data **out_padata = NULL;
krb5_pa_data **enc_padata = NULL;
krb5_keyblock *subkey = NULL;
if (s4u_user.user_id.user != NULL && s4u_user.user_id.user->length) {
in_padata = calloc(2, sizeof(krb5_pa_data *));
if (in_padata == NULL) {
code = ENOMEM;
goto cleanup;
}
code = build_pa_for_user(context,
tgtptr,
&s4u_user.user_id, &in_padata[0]);
if (code != 0) {
krb5_free_pa_data(context, in_padata);
goto cleanup;
}
}
if (data_eq(tgtptr->server->data[1], in_creds->server->realm)) {
/* When asking the server realm, use the real principal. */
s4u_creds.server = in_creds->server;
} else {
/* When asking a foreign realm, use the enterprise principal, with
* the realm set to the TGS realm. */
sprinc = *eprinc;
sprinc.realm = tgtptr->server->data[1];
s4u_creds.server = &sprinc;
}
code = krb5_get_cred_via_tkt_ext(context, tgtptr,
KDC_OPT_CANONICALIZE |
FLAGS2OPTS(tgtptr->ticket_flags) |
kdcopt,
tgtptr->addresses,
in_padata, &s4u_creds,
build_pa_s4u_x509_user, &s4u_user,
&out_padata, &enc_padata,
out_creds, &subkey);
if (code != 0) {
krb5_free_checksum_contents(context, &s4u_user.cksum);
krb5_free_pa_data(context, in_padata);
goto cleanup;
}
code = verify_s4u2self_reply(context, subkey, &s4u_user,
out_padata, enc_padata);
krb5_free_checksum_contents(context, &s4u_user.cksum);
krb5_free_pa_data(context, in_padata);
krb5_free_pa_data(context, out_padata);
krb5_free_pa_data(context, enc_padata);
krb5_free_keyblock(context, subkey);
if (code != 0)
goto cleanup;
if (krb5_principal_compare(context,
in_creds->server,
(*out_creds)->server)) {
code = 0;
goto cleanup;
} else if (IS_TGS_PRINC((*out_creds)->server)) {
krb5_data *r1 = &tgtptr->server->data[1];
krb5_data *r2 = &(*out_creds)->server->data[1];
if (data_eq(*r1, *r2)) {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
code = KRB5_ERR_HOST_REALM_UNKNOWN;
break;
}
for (i = 0; i < referral_count; i++) {
if (krb5_principal_compare(context,
(*out_creds)->server,
referral_tgts[i]->server)) {
code = KRB5_KDC_UNREACH;
goto cleanup;
}
}
tgtptr = *out_creds;
referral_tgts[referral_count] = *out_creds;
*out_creds = NULL;
} else {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
code = KRB5KRB_AP_WRONG_PRINC; /* XXX */
break;
}
}
cleanup:
for (i = 0; i < KRB5_REFERRAL_MAXHOPS; i++) {
if (referral_tgts[i] != NULL)
krb5_free_creds(context, referral_tgts[i]);
}
krb5_free_principal(context, tgs);
krb5_free_principal(context, eprinc);
krb5_free_creds(context, tgt);
krb5_free_principal(context, s4u_user.user_id.user);
krb5_free_checksum_contents(context, &s4u_user.cksum);
return code;
}
krb5_error_code KRB5_CALLCONV
krb5_get_credentials_for_user(krb5_context context, krb5_flags options,
krb5_ccache ccache, krb5_creds *in_creds,
krb5_data *subject_cert,
krb5_creds **out_creds)
{
krb5_error_code code;
krb5_principal realm = NULL;
*out_creds = NULL;
if (options & KRB5_GC_CONSTRAINED_DELEGATION) {
code = EINVAL;
goto cleanup;
}
if (in_creds->client != NULL) {
/* Uncanonicalised check */
code = krb5_get_credentials(context, options | KRB5_GC_CACHED,
ccache, in_creds, out_creds);
if (code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE)
goto cleanup;
if ((options & KRB5_GC_CACHED) && !(options & KRB5_GC_CANONICALIZE))
goto cleanup;
}
code = s4u_identify_user(context, in_creds, subject_cert, &realm);
if (code != 0)
goto cleanup;
if (in_creds->client != NULL &&
in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) {
/* Post-canonicalisation check for enterprise principals */
krb5_creds mcreds = *in_creds;
mcreds.client = realm;
code = krb5_get_credentials(context, options | KRB5_GC_CACHED,
ccache, &mcreds, out_creds);
if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE)
|| (options & KRB5_GC_CACHED))
goto cleanup;
}
code = krb5_get_self_cred_from_kdc(context, options, ccache, in_creds,
subject_cert, &realm->realm, out_creds);
if (code != 0)
goto cleanup;
assert(*out_creds != NULL);
if ((options & KRB5_GC_NO_STORE) == 0) {
code = krb5_cc_store_cred(context, ccache, *out_creds);
if (code != 0)
goto cleanup;
}
cleanup:
if (code != 0 && *out_creds != NULL) {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
}
krb5_free_principal(context, realm);
return code;
}
/*
* Exported API for constrained delegation (S4U2Proxy).
*
* This is preferable to using krb5_get_credentials directly because
* it can perform some additional checks.
*/
krb5_error_code KRB5_CALLCONV
krb5_get_credentials_for_proxy(krb5_context context,
krb5_flags options,
krb5_ccache ccache,
krb5_creds *in_creds,
krb5_ticket *evidence_tkt,
krb5_creds **out_creds)
{
krb5_error_code code;
krb5_creds mcreds;
krb5_creds *ncreds = NULL;
krb5_flags fields;
krb5_data *evidence_tkt_data = NULL;
krb5_creds s4u_creds;
*out_creds = NULL;
if (in_creds == NULL || in_creds->client == NULL ||
evidence_tkt == NULL || evidence_tkt->enc_part2 == NULL) {
code = EINVAL;
goto cleanup;
}
/*
* Caller should have set in_creds->client to match evidence
* ticket client
*/
if (!krb5_principal_compare(context, evidence_tkt->enc_part2->client,
in_creds->client)) {
code = EINVAL;
goto cleanup;
}
if ((evidence_tkt->enc_part2->flags & TKT_FLG_FORWARDABLE) == 0) {
code = KRB5_TKT_NOT_FORWARDABLE;
goto cleanup;
}
code = krb5int_construct_matching_creds(context, options, in_creds,
&mcreds, &fields);
if (code != 0)
goto cleanup;
ncreds = calloc(1, sizeof(*ncreds));
if (ncreds == NULL) {
code = ENOMEM;
goto cleanup;
}
ncreds->magic = KV5M_CRED;
code = krb5_cc_retrieve_cred(context, ccache, fields, &mcreds, ncreds);
if (code != 0) {
free(ncreds);
ncreds = in_creds;
} else {
*out_creds = ncreds;
}
if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE)
|| options & KRB5_GC_CACHED)
goto cleanup;
code = encode_krb5_ticket(evidence_tkt, &evidence_tkt_data);
if (code != 0)
goto cleanup;
s4u_creds = *in_creds;
s4u_creds.client = evidence_tkt->server;
s4u_creds.second_ticket = *evidence_tkt_data;
code = krb5_get_credentials(context,
options | KRB5_GC_CONSTRAINED_DELEGATION,
ccache,
&s4u_creds,
out_creds);
if (code != 0)
goto cleanup;
/*
* Check client name because we couldn't compare that inside
* krb5_get_credentials() (enc_part2 is unavailable in clear)
*/
if (!krb5_principal_compare(context,
evidence_tkt->enc_part2->client,
(*out_creds)->client)) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
cleanup:
if (*out_creds != NULL && code != 0) {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
}
if (evidence_tkt_data != NULL)
krb5_free_data(context, evidence_tkt_data);
return code;
}
| /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/krb/s4u_creds.c */
/*
* Copyright (C) 2009 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "k5-int.h"
#include "int-proto.h"
/* Convert ticket flags to necessary KDC options */
#define FLAGS2OPTS(flags) (flags & KDC_TKT_COMMON_MASK)
/*
* Implements S4U2Self, by which a service can request a ticket to
* itself on behalf of an arbitrary principal.
*/
static krb5_error_code
krb5_get_as_key_noop(
krb5_context context,
krb5_principal client,
krb5_enctype etype,
krb5_prompter_fct prompter,
void *prompter_data,
krb5_data *salt,
krb5_data *params,
krb5_keyblock *as_key,
void *gak_data,
k5_response_items *ritems)
{
/* force a hard error, we don't actually have the key */
return KRB5_PREAUTH_FAILED;
}
static krb5_error_code
s4u_identify_user(krb5_context context,
krb5_creds *in_creds,
krb5_data *subject_cert,
krb5_principal *canon_user)
{
krb5_error_code code;
krb5_preauthtype ptypes[1] = { KRB5_PADATA_S4U_X509_USER };
krb5_creds creds;
int use_master = 0;
krb5_get_init_creds_opt *opts = NULL;
krb5_principal_data client;
krb5_s4u_userid userid;
*canon_user = NULL;
if (in_creds->client == NULL && subject_cert == NULL) {
return EINVAL;
}
if (in_creds->client != NULL &&
in_creds->client->type != KRB5_NT_ENTERPRISE_PRINCIPAL) {
int anonymous;
anonymous = krb5_principal_compare(context, in_creds->client,
krb5_anonymous_principal());
return krb5_copy_principal(context,
anonymous ? in_creds->server
: in_creds->client,
canon_user);
}
memset(&creds, 0, sizeof(creds));
memset(&userid, 0, sizeof(userid));
if (subject_cert != NULL)
userid.subject_cert = *subject_cert;
code = krb5_get_init_creds_opt_alloc(context, &opts);
if (code != 0)
goto cleanup;
krb5_get_init_creds_opt_set_tkt_life(opts, 15);
krb5_get_init_creds_opt_set_renew_life(opts, 0);
krb5_get_init_creds_opt_set_forwardable(opts, 0);
krb5_get_init_creds_opt_set_proxiable(opts, 0);
krb5_get_init_creds_opt_set_canonicalize(opts, 1);
krb5_get_init_creds_opt_set_preauth_list(opts, ptypes, 1);
if (in_creds->client != NULL) {
client = *in_creds->client;
client.realm = in_creds->server->realm;
} else {
client.magic = KV5M_PRINCIPAL;
client.realm = in_creds->server->realm;
/* should this be NULL, empty or a fixed string? XXX */
client.data = NULL;
client.length = 0;
client.type = KRB5_NT_ENTERPRISE_PRINCIPAL;
}
code = k5_get_init_creds(context, &creds, &client, NULL, NULL, 0, NULL,
opts, krb5_get_as_key_noop, &userid, &use_master,
NULL);
if (!code || code == KRB5_PREAUTH_FAILED || code == KRB5KDC_ERR_KEY_EXP) {
*canon_user = userid.user;
userid.user = NULL;
code = 0;
}
cleanup:
krb5_free_cred_contents(context, &creds);
if (opts != NULL)
krb5_get_init_creds_opt_free(context, opts);
if (userid.user != NULL)
krb5_free_principal(context, userid.user);
return code;
}
static krb5_error_code
make_pa_for_user_checksum(krb5_context context,
krb5_keyblock *key,
krb5_pa_for_user *req,
krb5_checksum *cksum)
{
krb5_error_code code;
int i;
char *p;
krb5_data data;
data.length = 4;
for (i = 0; i < req->user->length; i++)
data.length += req->user->data[i].length;
data.length += req->user->realm.length;
data.length += req->auth_package.length;
p = data.data = malloc(data.length);
if (data.data == NULL)
return ENOMEM;
p[0] = (req->user->type >> 0) & 0xFF;
p[1] = (req->user->type >> 8) & 0xFF;
p[2] = (req->user->type >> 16) & 0xFF;
p[3] = (req->user->type >> 24) & 0xFF;
p += 4;
for (i = 0; i < req->user->length; i++) {
if (req->user->data[i].length > 0)
memcpy(p, req->user->data[i].data, req->user->data[i].length);
p += req->user->data[i].length;
}
if (req->user->realm.length > 0)
memcpy(p, req->user->realm.data, req->user->realm.length);
p += req->user->realm.length;
if (req->auth_package.length > 0)
memcpy(p, req->auth_package.data, req->auth_package.length);
/* Per spec, use hmac-md5 checksum regardless of key type. */
code = krb5_c_make_checksum(context, CKSUMTYPE_HMAC_MD5_ARCFOUR, key,
KRB5_KEYUSAGE_APP_DATA_CKSUM, &data,
cksum);
free(data.data);
return code;
}
static krb5_error_code
build_pa_for_user(krb5_context context,
krb5_creds *tgt,
krb5_s4u_userid *userid,
krb5_pa_data **out_padata)
{
krb5_error_code code;
krb5_pa_data *padata;
krb5_pa_for_user for_user;
krb5_data *for_user_data = NULL;
char package[] = "Kerberos";
if (userid->user == NULL)
return EINVAL;
memset(&for_user, 0, sizeof(for_user));
for_user.user = userid->user;
for_user.auth_package.data = package;
for_user.auth_package.length = sizeof(package) - 1;
code = make_pa_for_user_checksum(context, &tgt->keyblock,
&for_user, &for_user.cksum);
if (code != 0)
goto cleanup;
code = encode_krb5_pa_for_user(&for_user, &for_user_data);
if (code != 0)
goto cleanup;
padata = malloc(sizeof(*padata));
if (padata == NULL) {
code = ENOMEM;
goto cleanup;
}
padata->magic = KV5M_PA_DATA;
padata->pa_type = KRB5_PADATA_FOR_USER;
padata->length = for_user_data->length;
padata->contents = (krb5_octet *)for_user_data->data;
free(for_user_data);
for_user_data = NULL;
*out_padata = padata;
cleanup:
if (for_user.cksum.contents != NULL)
krb5_free_checksum_contents(context, &for_user.cksum);
krb5_free_data(context, for_user_data);
return code;
}
/*
* This function is invoked by krb5int_make_tgs_request_ext() just before the
* request is encoded; it gives us access to the nonce and subkey without
* requiring them to be generated by the caller.
*/
static krb5_error_code
build_pa_s4u_x509_user(krb5_context context,
krb5_keyblock *subkey,
krb5_kdc_req *tgsreq,
void *gcvt_data)
{
krb5_error_code code;
krb5_pa_s4u_x509_user *s4u_user = (krb5_pa_s4u_x509_user *)gcvt_data;
krb5_data *data = NULL;
krb5_pa_data **padata;
krb5_cksumtype cksumtype;
int i;
assert(s4u_user->cksum.contents == NULL);
s4u_user->user_id.nonce = tgsreq->nonce;
code = encode_krb5_s4u_userid(&s4u_user->user_id, &data);
if (code != 0)
goto cleanup;
/* [MS-SFU] 2.2.2: unusual to say the least, but enc_padata secures it */
if (subkey->enctype == ENCTYPE_ARCFOUR_HMAC ||
subkey->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) {
cksumtype = CKSUMTYPE_RSA_MD4;
} else {
code = krb5int_c_mandatory_cksumtype(context, subkey->enctype,
&cksumtype);
}
if (code != 0)
goto cleanup;
code = krb5_c_make_checksum(context, cksumtype, subkey,
KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data,
&s4u_user->cksum);
if (code != 0)
goto cleanup;
krb5_free_data(context, data);
data = NULL;
code = encode_krb5_pa_s4u_x509_user(s4u_user, &data);
if (code != 0)
goto cleanup;
assert(tgsreq->padata != NULL);
for (i = 0; tgsreq->padata[i] != NULL; i++)
;
padata = realloc(tgsreq->padata,
(i + 2) * sizeof(krb5_pa_data *));
if (padata == NULL) {
code = ENOMEM;
goto cleanup;
}
tgsreq->padata = padata;
padata[i] = malloc(sizeof(krb5_pa_data));
if (padata[i] == NULL) {
code = ENOMEM;
goto cleanup;
}
padata[i]->magic = KV5M_PA_DATA;
padata[i]->pa_type = KRB5_PADATA_S4U_X509_USER;
padata[i]->length = data->length;
padata[i]->contents = (krb5_octet *)data->data;
padata[i + 1] = NULL;
free(data);
data = NULL;
cleanup:
if (code != 0 && s4u_user->cksum.contents != NULL) {
krb5_free_checksum_contents(context, &s4u_user->cksum);
s4u_user->cksum.contents = NULL;
}
krb5_free_data(context, data);
return code;
}
static krb5_error_code
verify_s4u2self_reply(krb5_context context,
krb5_keyblock *subkey,
krb5_pa_s4u_x509_user *req_s4u_user,
krb5_pa_data **rep_padata,
krb5_pa_data **enc_padata)
{
krb5_error_code code;
krb5_pa_data *rep_s4u_padata, *enc_s4u_padata;
krb5_pa_s4u_x509_user *rep_s4u_user = NULL;
krb5_data data, *datap = NULL;
krb5_keyusage usage;
krb5_boolean valid;
krb5_boolean not_newer;
assert(req_s4u_user != NULL);
switch (subkey->enctype) {
case ENCTYPE_DES_CBC_CRC:
case ENCTYPE_DES_CBC_MD4:
case ENCTYPE_DES_CBC_MD5:
case ENCTYPE_DES3_CBC_SHA1:
case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
case ENCTYPE_ARCFOUR_HMAC_EXP :
not_newer = TRUE;
break;
default:
not_newer = FALSE;
break;
}
enc_s4u_padata = krb5int_find_pa_data(context,
enc_padata,
KRB5_PADATA_S4U_X509_USER);
/* XXX this will break newer enctypes with a MIT 1.7 KDC */
rep_s4u_padata = krb5int_find_pa_data(context,
rep_padata,
KRB5_PADATA_S4U_X509_USER);
if (rep_s4u_padata == NULL) {
if (not_newer == FALSE || enc_s4u_padata != NULL)
return KRB5_KDCREP_MODIFIED;
else
return 0;
}
data.length = rep_s4u_padata->length;
data.data = (char *)rep_s4u_padata->contents;
code = decode_krb5_pa_s4u_x509_user(&data, &rep_s4u_user);
if (code != 0)
goto cleanup;
if (rep_s4u_user->user_id.nonce != req_s4u_user->user_id.nonce) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
code = encode_krb5_s4u_userid(&rep_s4u_user->user_id, &datap);
if (code != 0)
goto cleanup;
if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE)
usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY;
else
usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST;
code = krb5_c_verify_checksum(context, subkey, usage, datap,
&rep_s4u_user->cksum, &valid);
if (code != 0)
goto cleanup;
if (valid == FALSE) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
/*
* KDCs that support KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE also return
* S4U enc_padata for older (pre-AES) encryption types only.
*/
if (not_newer) {
if (enc_s4u_padata == NULL) {
if (rep_s4u_user->user_id.options &
KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
} else {
if (enc_s4u_padata->length !=
req_s4u_user->cksum.length + rep_s4u_user->cksum.length) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
if (memcmp(enc_s4u_padata->contents,
req_s4u_user->cksum.contents,
req_s4u_user->cksum.length) ||
memcmp(&enc_s4u_padata->contents[req_s4u_user->cksum.length],
rep_s4u_user->cksum.contents,
rep_s4u_user->cksum.length)) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
}
} else if (!krb5_c_is_keyed_cksum(rep_s4u_user->cksum.checksum_type)) {
code = KRB5KRB_AP_ERR_INAPP_CKSUM;
goto cleanup;
}
cleanup:
krb5_free_pa_s4u_x509_user(context, rep_s4u_user);
krb5_free_data(context, datap);
return code;
}
/* Unparse princ and re-parse it as an enterprise principal. */
static krb5_error_code
convert_to_enterprise(krb5_context context, krb5_principal princ,
krb5_principal *eprinc_out)
{
krb5_error_code code;
char *str;
*eprinc_out = NULL;
code = krb5_unparse_name(context, princ, &str);
if (code != 0)
return code;
code = krb5_parse_name_flags(context, str,
KRB5_PRINCIPAL_PARSE_ENTERPRISE |
KRB5_PRINCIPAL_PARSE_IGNORE_REALM,
eprinc_out);
krb5_free_unparsed_name(context, str);
return code;
}
static krb5_error_code
krb5_get_self_cred_from_kdc(krb5_context context,
krb5_flags options,
krb5_ccache ccache,
krb5_creds *in_creds,
krb5_data *subject_cert,
krb5_data *user_realm,
krb5_creds **out_creds)
{
krb5_error_code code;
krb5_principal tgs = NULL, eprinc = NULL;
krb5_principal_data sprinc;
krb5_creds tgtq, s4u_creds, *tgt = NULL, *tgtptr;
krb5_creds *referral_tgts[KRB5_REFERRAL_MAXHOPS];
krb5_pa_s4u_x509_user s4u_user;
int referral_count = 0, i;
krb5_flags kdcopt;
memset(&tgtq, 0, sizeof(tgtq));
memset(referral_tgts, 0, sizeof(referral_tgts));
*out_creds = NULL;
memset(&s4u_user, 0, sizeof(s4u_user));
if (in_creds->client != NULL && in_creds->client->length > 0) {
if (in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) {
code = krb5_build_principal_ext(context,
&s4u_user.user_id.user,
user_realm->length,
user_realm->data,
in_creds->client->data[0].length,
in_creds->client->data[0].data,
0);
if (code != 0)
goto cleanup;
s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL;
} else {
code = krb5_copy_principal(context,
in_creds->client,
&s4u_user.user_id.user);
if (code != 0)
goto cleanup;
}
} else {
code = krb5_build_principal_ext(context, &s4u_user.user_id.user,
user_realm->length,
user_realm->data);
if (code != 0)
goto cleanup;
s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL;
}
if (subject_cert != NULL)
s4u_user.user_id.subject_cert = *subject_cert;
s4u_user.user_id.options = KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE;
/* First, acquire a TGT to the user's realm. */
code = krb5int_tgtname(context, user_realm, &in_creds->server->realm,
&tgs);
if (code != 0)
goto cleanup;
tgtq.client = in_creds->server;
tgtq.server = tgs;
code = krb5_get_credentials(context, options, ccache, &tgtq, &tgt);
if (code != 0)
goto cleanup;
tgtptr = tgt;
/* Convert the server principal to an enterprise principal, for use with
* foreign realms. */
code = convert_to_enterprise(context, in_creds->server, &eprinc);
if (code != 0)
goto cleanup;
/* Make a shallow copy of in_creds with client pointing to the server
* principal. We will set s4u_creds.server for each request. */
s4u_creds = *in_creds;
s4u_creds.client = in_creds->server;
/* Then, walk back the referral path to S4U2Self for user */
kdcopt = 0;
if (options & KRB5_GC_CANONICALIZE)
kdcopt |= KDC_OPT_CANONICALIZE;
if (options & KRB5_GC_FORWARDABLE)
kdcopt |= KDC_OPT_FORWARDABLE;
if (options & KRB5_GC_NO_TRANSIT_CHECK)
kdcopt |= KDC_OPT_DISABLE_TRANSITED_CHECK;
for (referral_count = 0;
referral_count < KRB5_REFERRAL_MAXHOPS;
referral_count++)
{
krb5_pa_data **in_padata = NULL;
krb5_pa_data **out_padata = NULL;
krb5_pa_data **enc_padata = NULL;
krb5_keyblock *subkey = NULL;
if (s4u_user.user_id.user != NULL && s4u_user.user_id.user->length) {
in_padata = calloc(2, sizeof(krb5_pa_data *));
if (in_padata == NULL) {
code = ENOMEM;
goto cleanup;
}
code = build_pa_for_user(context,
tgtptr,
&s4u_user.user_id, &in_padata[0]);
if (code != 0) {
krb5_free_pa_data(context, in_padata);
goto cleanup;
}
}
if (data_eq(tgtptr->server->data[1], in_creds->server->realm)) {
/* When asking the server realm, use the real principal. */
s4u_creds.server = in_creds->server;
} else {
/* When asking a foreign realm, use the enterprise principal, with
* the realm set to the TGS realm. */
sprinc = *eprinc;
sprinc.realm = tgtptr->server->data[1];
s4u_creds.server = &sprinc;
}
code = krb5_get_cred_via_tkt_ext(context, tgtptr,
KDC_OPT_CANONICALIZE |
FLAGS2OPTS(tgtptr->ticket_flags) |
kdcopt,
tgtptr->addresses,
in_padata, &s4u_creds,
build_pa_s4u_x509_user, &s4u_user,
&out_padata, &enc_padata,
out_creds, &subkey);
if (code != 0) {
krb5_free_checksum_contents(context, &s4u_user.cksum);
krb5_free_pa_data(context, in_padata);
goto cleanup;
}
code = verify_s4u2self_reply(context, subkey, &s4u_user,
out_padata, enc_padata);
krb5_free_checksum_contents(context, &s4u_user.cksum);
krb5_free_pa_data(context, in_padata);
krb5_free_pa_data(context, out_padata);
krb5_free_pa_data(context, enc_padata);
krb5_free_keyblock(context, subkey);
if (code != 0)
goto cleanup;
if (krb5_principal_compare(context,
in_creds->server,
(*out_creds)->server)) {
code = 0;
goto cleanup;
} else if (IS_TGS_PRINC((*out_creds)->server)) {
krb5_data *r1 = &tgtptr->server->data[1];
krb5_data *r2 = &(*out_creds)->server->data[1];
if (data_eq(*r1, *r2)) {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
code = KRB5_ERR_HOST_REALM_UNKNOWN;
break;
}
for (i = 0; i < referral_count; i++) {
if (krb5_principal_compare(context,
(*out_creds)->server,
referral_tgts[i]->server)) {
code = KRB5_KDC_UNREACH;
goto cleanup;
}
}
tgtptr = *out_creds;
referral_tgts[referral_count] = *out_creds;
*out_creds = NULL;
} else {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
code = KRB5KRB_AP_WRONG_PRINC; /* XXX */
break;
}
}
cleanup:
for (i = 0; i < KRB5_REFERRAL_MAXHOPS; i++) {
if (referral_tgts[i] != NULL)
krb5_free_creds(context, referral_tgts[i]);
}
krb5_free_principal(context, tgs);
krb5_free_principal(context, eprinc);
krb5_free_creds(context, tgt);
krb5_free_principal(context, s4u_user.user_id.user);
krb5_free_checksum_contents(context, &s4u_user.cksum);
return code;
}
krb5_error_code KRB5_CALLCONV
krb5_get_credentials_for_user(krb5_context context, krb5_flags options,
krb5_ccache ccache, krb5_creds *in_creds,
krb5_data *subject_cert,
krb5_creds **out_creds)
{
krb5_error_code code;
krb5_principal realm = NULL;
*out_creds = NULL;
if (options & KRB5_GC_CONSTRAINED_DELEGATION) {
code = EINVAL;
goto cleanup;
}
if (in_creds->client != NULL) {
/* Uncanonicalised check */
code = krb5_get_credentials(context, options | KRB5_GC_CACHED,
ccache, in_creds, out_creds);
if (code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE)
goto cleanup;
if ((options & KRB5_GC_CACHED) && !(options & KRB5_GC_CANONICALIZE))
goto cleanup;
}
code = s4u_identify_user(context, in_creds, subject_cert, &realm);
if (code != 0)
goto cleanup;
if (in_creds->client != NULL &&
in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) {
/* Post-canonicalisation check for enterprise principals */
krb5_creds mcreds = *in_creds;
mcreds.client = realm;
code = krb5_get_credentials(context, options | KRB5_GC_CACHED,
ccache, &mcreds, out_creds);
if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE)
|| (options & KRB5_GC_CACHED))
goto cleanup;
}
code = krb5_get_self_cred_from_kdc(context, options, ccache, in_creds,
subject_cert, &realm->realm, out_creds);
if (code != 0)
goto cleanup;
assert(*out_creds != NULL);
if ((options & KRB5_GC_NO_STORE) == 0) {
code = krb5_cc_store_cred(context, ccache, *out_creds);
if (code != 0)
goto cleanup;
}
cleanup:
if (code != 0 && *out_creds != NULL) {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
}
krb5_free_principal(context, realm);
return code;
}
/*
* Exported API for constrained delegation (S4U2Proxy).
*
* This is preferable to using krb5_get_credentials directly because
* it can perform some additional checks.
*/
krb5_error_code KRB5_CALLCONV
krb5_get_credentials_for_proxy(krb5_context context,
krb5_flags options,
krb5_ccache ccache,
krb5_creds *in_creds,
krb5_ticket *evidence_tkt,
krb5_creds **out_creds)
{
krb5_error_code code;
krb5_creds mcreds;
krb5_creds *ncreds = NULL;
krb5_flags fields;
krb5_data *evidence_tkt_data = NULL;
krb5_creds s4u_creds;
*out_creds = NULL;
if (in_creds == NULL || in_creds->client == NULL ||
evidence_tkt == NULL || evidence_tkt->enc_part2 == NULL) {
code = EINVAL;
goto cleanup;
}
/*
* Caller should have set in_creds->client to match evidence
* ticket client
*/
if (!krb5_principal_compare(context, evidence_tkt->enc_part2->client,
in_creds->client)) {
code = EINVAL;
goto cleanup;
}
if ((evidence_tkt->enc_part2->flags & TKT_FLG_FORWARDABLE) == 0) {
code = KRB5_TKT_NOT_FORWARDABLE;
goto cleanup;
}
code = krb5int_construct_matching_creds(context, options, in_creds,
&mcreds, &fields);
if (code != 0)
goto cleanup;
ncreds = calloc(1, sizeof(*ncreds));
if (ncreds == NULL) {
code = ENOMEM;
goto cleanup;
}
ncreds->magic = KV5M_CRED;
code = krb5_cc_retrieve_cred(context, ccache, fields, &mcreds, ncreds);
if (code != 0) {
free(ncreds);
ncreds = in_creds;
} else {
*out_creds = ncreds;
}
if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE)
|| options & KRB5_GC_CACHED)
goto cleanup;
code = encode_krb5_ticket(evidence_tkt, &evidence_tkt_data);
if (code != 0)
goto cleanup;
s4u_creds = *in_creds;
s4u_creds.client = evidence_tkt->server;
s4u_creds.second_ticket = *evidence_tkt_data;
code = krb5_get_credentials(context,
options | KRB5_GC_CONSTRAINED_DELEGATION,
ccache,
&s4u_creds,
out_creds);
if (code != 0)
goto cleanup;
/*
* Check client name because we couldn't compare that inside
* krb5_get_credentials() (enc_part2 is unavailable in clear)
*/
if (!krb5_principal_compare(context,
evidence_tkt->enc_part2->client,
(*out_creds)->client)) {
code = KRB5_KDCREP_MODIFIED;
goto cleanup;
}
cleanup:
if (*out_creds != NULL && code != 0) {
krb5_free_creds(context, *out_creds);
*out_creds = NULL;
}
if (evidence_tkt_data != NULL)
krb5_free_data(context, evidence_tkt_data);
return code;
}
|
95,232,616,501,942 | from k5test import *
realm = K5Realm(create_host=False, get_creds=False)
usercache = 'FILE:' + os.path.join(realm.testdir, 'usercache')
storagecache = 'FILE:' + os.path.join(realm.testdir, 'save')
# Create two service principals with keys in the default keytab.
service1 = 'service/1@%s' % realm.realm
realm.addprinc(service1)
realm.extract_keytab(service1, realm.keytab)
service2 = 'service/2@%s' % realm.realm
realm.addprinc(service2)
realm.extract_keytab(service2, realm.keytab)
puser = 'p:' + realm.user_princ
pservice1 = 'p:' + service1
pservice2 = 'p:' + service2
# Get forwardable creds for service1 in the default cache.
realm.kinit(service1, None, ['-f', '-k'])
# Try krb5 -> S4U2Proxy with forwardable user creds. This should fail
# at the S4U2Proxy step since the DB2 back end currently has no
# support for allowing it.
realm.kinit(realm.user_princ, password('user'), ['-f', '-c', usercache])
output = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2], expected_code=1)
if ('auth1: ' + realm.user_princ not in output or
'NOT_ALLOWED_TO_DELEGATE' not in output):
fail('krb5 -> s4u2proxy')
# Again with SPNEGO.
output = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache,
'-', pservice1, pservice2],
expected_code=1)
if ('auth1: ' + realm.user_princ not in output or
'NOT_ALLOWED_TO_DELEGATE' not in output):
fail('krb5 -> s4u2proxy (SPNEGO)')
# Try krb5 -> S4U2Proxy without forwardable user creds. This should
# result in no delegated credential being created by
# accept_sec_context.
realm.kinit(realm.user_princ, password('user'), ['-c', usercache])
realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, pservice1,
pservice1, pservice2], expected_msg='no credential delegated')
# Try S4U2Self. Ask for an S4U2Proxy step; this won't happen because
# service/1 isn't allowed to get a forwardable S4U2Self ticket.
output = realm.run(['./t_s4u', puser, pservice2])
if ('Warning: no delegated cred handle' not in output or
'Source name:\t' + realm.user_princ not in output):
fail('s4u2self')
output = realm.run(['./t_s4u', '--spnego', puser, pservice2])
if ('Warning: no delegated cred handle' not in output or
'Source name:\t' + realm.user_princ not in output):
fail('s4u2self (SPNEGO)')
# Correct that problem and try again. As above, the S4U2Proxy step
# won't actually succeed since we don't support that in DB2.
realm.run([kadminl, 'modprinc', '+ok_to_auth_as_delegate', service1])
realm.run(['./t_s4u', puser, pservice2], expected_code=1,
expected_msg='NOT_ALLOWED_TO_DELEGATE')
# Again with SPNEGO. This uses SPNEGO for the initial authentication,
# but still uses krb5 for S4U2Proxy--the delegated cred is returned as
# a krb5 cred, not a SPNEGO cred, and t_s4u uses the delegated cred
# directly rather than saving and reacquiring it.
realm.run(['./t_s4u', '--spnego', puser, pservice2], expected_code=1,
expected_msg='NOT_ALLOWED_TO_DELEGATE')
realm.stop()
# Set up a realm using the test KDB module so that we can do
# successful S4U2Proxy delegations.
testprincs = {'krbtgt/KRBTEST.COM': {'keys': 'aes128-cts'},
'user': {'keys': 'aes128-cts'},
'service/1': {'flags': '+ok-to-auth-as-delegate',
'keys': 'aes128-cts'},
'service/2': {'keys': 'aes128-cts'}}
conf = {'realms': {'$realm': {'database_module': 'test'}},
'dbmodules': {'test': {'db_library': 'test',
'princs': testprincs,
'delegation': {'service/1': 'service/2'}}}}
realm = K5Realm(create_kdb=False, kdc_conf=conf)
userkeytab = 'FILE:' + os.path.join(realm.testdir, 'userkeytab')
realm.extract_keytab(realm.user_princ, userkeytab)
realm.extract_keytab(service1, realm.keytab)
realm.extract_keytab(service2, realm.keytab)
realm.start_kdc()
# Get forwardable creds for service1 in the default cache.
realm.kinit(service1, None, ['-f', '-k'])
# Successful krb5 -> S4U2Proxy, with krb5 and SPNEGO mechs.
realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache,
'-t', userkeytab])
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
out = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache,
'-', pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
# Successful S4U2Self -> S4U2Proxy.
out = realm.run(['./t_s4u', puser, pservice2])
# Regression test for #8139: get a user ticket directly for service1 and
# try krb5 -> S4U2Proxy.
realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache,
'-t', userkeytab, '-S', service1])
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
# Simulate a krbtgt rollover and verify that the user ticket can still
# be validated.
realm.stop_kdc()
newtgt_keys = ['2 aes128-cts', '1 aes128-cts']
newtgt_princs = {'krbtgt/KRBTEST.COM': {'keys': newtgt_keys}}
newtgt_conf = {'dbmodules': {'test': {'princs': newtgt_princs}}}
newtgt_env = realm.special_env('newtgt', True, kdc_conf=newtgt_conf)
realm.start_kdc(env=newtgt_env)
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
# Get a user ticket after the krbtgt rollover and verify that
# S4U2Proxy delegation works (also a #8139 regression test).
realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache,
'-t', userkeytab])
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
realm.stop()
# Test cross realm S4U2Self using server referrals.
mark('cross-realm S4U2Self')
testprincs = {'krbtgt/SREALM': {'keys': 'aes128-cts'},
'krbtgt/UREALM': {'keys': 'aes128-cts'},
'user': {'keys': 'aes128-cts', 'flags': '+preauth'}}
kdcconf1 = {'realms': {'$realm': {'database_module': 'test'}},
'dbmodules': {'test': {'db_library': 'test',
'princs': testprincs,
'alias': {'enterprise@abc': '@UREALM'}}}}
kdcconf2 = {'realms': {'$realm': {'database_module': 'test'}},
'dbmodules': {'test': {'db_library': 'test',
'princs': testprincs,
'alias': {'user@SREALM': '@SREALM',
'enterprise@abc': 'user'}}}}
r1, r2 = cross_realms(2, xtgts=(),
args=({'realm': 'SREALM', 'kdc_conf': kdcconf1},
{'realm': 'UREALM', 'kdc_conf': kdcconf2}),
create_kdb=False)
r1.start_kdc()
r2.start_kdc()
r1.extract_keytab(r1.user_princ, r1.keytab)
r1.kinit(r1.user_princ, None, ['-k', '-t', r1.keytab])
# Include a regression test for #8741 by unsetting the default realm.
remove_default = {'libdefaults': {'default_realm': None}}
no_default = r1.special_env('no_default', False, krb5_conf=remove_default)
msgs = ('Getting credentials user@UREALM -> user@SREALM',
'/Matching credential not found',
'Getting credentials user@SREALM -> krbtgt/UREALM@SREALM',
'Received creds for desired service krbtgt/UREALM@SREALM',
'via TGT krbtgt/UREALM@SREALM after requesting user\\@SREALM@UREALM',
'krbtgt/SREALM@UREALM differs from requested user\\@SREALM@UREALM',
'via TGT krbtgt/SREALM@UREALM after requesting user@SREALM',
'TGS reply is for user@UREALM -> user@SREALM')
r1.run(['./t_s4u', 'p:' + r2.user_princ, '-', r1.keytab], env=no_default,
expected_trace=msgs)
# Test realm identification of enterprise principal names ([MS-S4U]
# 3.1.5.1.1.1). Attach a bogus realm to the enterprise name to verify
# that we start at the server realm.
mark('cross-realm S4U2Self with enterprise name')
msgs = ('Getting initial credentials for enterprise\\@abc@SREALM',
'Processing preauth types: PA-FOR-X509-USER (130)',
'Sending unauthenticated request',
'/Realm not local to KDC',
'Following referral to realm UREALM',
'Processing preauth types: PA-FOR-X509-USER (130)',
'Sending unauthenticated request',
'/Additional pre-authentication required',
'/Generic preauthentication failure',
'Getting credentials enterprise\\@abc@UREALM -> user@SREALM',
'TGS reply is for enterprise\@abc@UREALM -> user@SREALM')
r1.run(['./t_s4u', 'e:enterprise@abc@NOREALM', '-', r1.keytab],
expected_trace=msgs)
r1.stop()
r2.stop()
success('S4U test cases')
| from k5test import *
realm = K5Realm(create_host=False, get_creds=False)
usercache = 'FILE:' + os.path.join(realm.testdir, 'usercache')
storagecache = 'FILE:' + os.path.join(realm.testdir, 'save')
# Create two service principals with keys in the default keytab.
service1 = 'service/1@%s' % realm.realm
realm.addprinc(service1)
realm.extract_keytab(service1, realm.keytab)
service2 = 'service/2@%s' % realm.realm
realm.addprinc(service2)
realm.extract_keytab(service2, realm.keytab)
puser = 'p:' + realm.user_princ
pservice1 = 'p:' + service1
pservice2 = 'p:' + service2
# Get forwardable creds for service1 in the default cache.
realm.kinit(service1, None, ['-f', '-k'])
# Try S4U2Self for user with a restricted password.
realm.run([kadminl, 'modprinc', '+needchange', realm.user_princ])
realm.run(['./t_s4u', 'e:user', '-'])
realm.run([kadminl, 'modprinc', '-needchange',
'-pwexpire', '1/1/2000', realm.user_princ])
realm.run(['./t_s4u', 'e:user', '-'])
realm.run([kadminl, 'modprinc', '-pwexpire', 'never', realm.user_princ])
# Try krb5 -> S4U2Proxy with forwardable user creds. This should fail
# at the S4U2Proxy step since the DB2 back end currently has no
# support for allowing it.
realm.kinit(realm.user_princ, password('user'), ['-f', '-c', usercache])
output = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2], expected_code=1)
if ('auth1: ' + realm.user_princ not in output or
'NOT_ALLOWED_TO_DELEGATE' not in output):
fail('krb5 -> s4u2proxy')
# Again with SPNEGO.
output = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache,
'-', pservice1, pservice2],
expected_code=1)
if ('auth1: ' + realm.user_princ not in output or
'NOT_ALLOWED_TO_DELEGATE' not in output):
fail('krb5 -> s4u2proxy (SPNEGO)')
# Try krb5 -> S4U2Proxy without forwardable user creds. This should
# result in no delegated credential being created by
# accept_sec_context.
realm.kinit(realm.user_princ, password('user'), ['-c', usercache])
realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, pservice1,
pservice1, pservice2], expected_msg='no credential delegated')
# Try S4U2Self. Ask for an S4U2Proxy step; this won't happen because
# service/1 isn't allowed to get a forwardable S4U2Self ticket.
output = realm.run(['./t_s4u', puser, pservice2])
if ('Warning: no delegated cred handle' not in output or
'Source name:\t' + realm.user_princ not in output):
fail('s4u2self')
output = realm.run(['./t_s4u', '--spnego', puser, pservice2])
if ('Warning: no delegated cred handle' not in output or
'Source name:\t' + realm.user_princ not in output):
fail('s4u2self (SPNEGO)')
# Correct that problem and try again. As above, the S4U2Proxy step
# won't actually succeed since we don't support that in DB2.
realm.run([kadminl, 'modprinc', '+ok_to_auth_as_delegate', service1])
realm.run(['./t_s4u', puser, pservice2], expected_code=1,
expected_msg='NOT_ALLOWED_TO_DELEGATE')
# Again with SPNEGO. This uses SPNEGO for the initial authentication,
# but still uses krb5 for S4U2Proxy--the delegated cred is returned as
# a krb5 cred, not a SPNEGO cred, and t_s4u uses the delegated cred
# directly rather than saving and reacquiring it.
realm.run(['./t_s4u', '--spnego', puser, pservice2], expected_code=1,
expected_msg='NOT_ALLOWED_TO_DELEGATE')
realm.stop()
# Set up a realm using the test KDB module so that we can do
# successful S4U2Proxy delegations.
testprincs = {'krbtgt/KRBTEST.COM': {'keys': 'aes128-cts'},
'user': {'keys': 'aes128-cts'},
'service/1': {'flags': '+ok-to-auth-as-delegate',
'keys': 'aes128-cts'},
'service/2': {'keys': 'aes128-cts'}}
conf = {'realms': {'$realm': {'database_module': 'test'}},
'dbmodules': {'test': {'db_library': 'test',
'princs': testprincs,
'delegation': {'service/1': 'service/2'}}}}
realm = K5Realm(create_kdb=False, kdc_conf=conf)
userkeytab = 'FILE:' + os.path.join(realm.testdir, 'userkeytab')
realm.extract_keytab(realm.user_princ, userkeytab)
realm.extract_keytab(service1, realm.keytab)
realm.extract_keytab(service2, realm.keytab)
realm.start_kdc()
# Get forwardable creds for service1 in the default cache.
realm.kinit(service1, None, ['-f', '-k'])
# Successful krb5 -> S4U2Proxy, with krb5 and SPNEGO mechs.
realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache,
'-t', userkeytab])
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
out = realm.run(['./t_s4u2proxy_krb5', '--spnego', usercache, storagecache,
'-', pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
# Successful S4U2Self -> S4U2Proxy.
out = realm.run(['./t_s4u', puser, pservice2])
# Regression test for #8139: get a user ticket directly for service1 and
# try krb5 -> S4U2Proxy.
realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache,
'-t', userkeytab, '-S', service1])
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
# Simulate a krbtgt rollover and verify that the user ticket can still
# be validated.
realm.stop_kdc()
newtgt_keys = ['2 aes128-cts', '1 aes128-cts']
newtgt_princs = {'krbtgt/KRBTEST.COM': {'keys': newtgt_keys}}
newtgt_conf = {'dbmodules': {'test': {'princs': newtgt_princs}}}
newtgt_env = realm.special_env('newtgt', True, kdc_conf=newtgt_conf)
realm.start_kdc(env=newtgt_env)
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
# Get a user ticket after the krbtgt rollover and verify that
# S4U2Proxy delegation works (also a #8139 regression test).
realm.kinit(realm.user_princ, None, ['-f', '-k', '-c', usercache,
'-t', userkeytab])
out = realm.run(['./t_s4u2proxy_krb5', usercache, storagecache, '-',
pservice1, pservice2])
if 'auth1: user@' not in out or 'auth2: user@' not in out:
fail('krb5 -> s4u2proxy')
realm.stop()
# Test cross realm S4U2Self using server referrals.
mark('cross-realm S4U2Self')
testprincs = {'krbtgt/SREALM': {'keys': 'aes128-cts'},
'krbtgt/UREALM': {'keys': 'aes128-cts'},
'user': {'keys': 'aes128-cts', 'flags': '+preauth'}}
kdcconf1 = {'realms': {'$realm': {'database_module': 'test'}},
'dbmodules': {'test': {'db_library': 'test',
'princs': testprincs,
'alias': {'enterprise@abc': '@UREALM'}}}}
kdcconf2 = {'realms': {'$realm': {'database_module': 'test'}},
'dbmodules': {'test': {'db_library': 'test',
'princs': testprincs,
'alias': {'user@SREALM': '@SREALM',
'enterprise@abc': 'user'}}}}
r1, r2 = cross_realms(2, xtgts=(),
args=({'realm': 'SREALM', 'kdc_conf': kdcconf1},
{'realm': 'UREALM', 'kdc_conf': kdcconf2}),
create_kdb=False)
r1.start_kdc()
r2.start_kdc()
r1.extract_keytab(r1.user_princ, r1.keytab)
r1.kinit(r1.user_princ, None, ['-k', '-t', r1.keytab])
# Include a regression test for #8741 by unsetting the default realm.
remove_default = {'libdefaults': {'default_realm': None}}
no_default = r1.special_env('no_default', False, krb5_conf=remove_default)
msgs = ('Getting credentials user@UREALM -> user@SREALM',
'/Matching credential not found',
'Getting credentials user@SREALM -> krbtgt/UREALM@SREALM',
'Received creds for desired service krbtgt/UREALM@SREALM',
'via TGT krbtgt/UREALM@SREALM after requesting user\\@SREALM@UREALM',
'krbtgt/SREALM@UREALM differs from requested user\\@SREALM@UREALM',
'via TGT krbtgt/SREALM@UREALM after requesting user@SREALM',
'TGS reply is for user@UREALM -> user@SREALM')
r1.run(['./t_s4u', 'p:' + r2.user_princ, '-', r1.keytab], env=no_default,
expected_trace=msgs)
# Test realm identification of enterprise principal names ([MS-S4U]
# 3.1.5.1.1.1). Attach a bogus realm to the enterprise name to verify
# that we start at the server realm.
mark('cross-realm S4U2Self with enterprise name')
msgs = ('Getting initial credentials for enterprise\\@abc@SREALM',
'Processing preauth types: PA-FOR-X509-USER (130)',
'Sending unauthenticated request',
'/Realm not local to KDC',
'Following referral to realm UREALM',
'Processing preauth types: PA-FOR-X509-USER (130)',
'Sending unauthenticated request',
'/Additional pre-authentication required',
'/Generic preauthentication failure',
'Getting credentials enterprise\\@abc@UREALM -> user@SREALM',
'TGS reply is for enterprise\@abc@UREALM -> user@SREALM')
r1.run(['./t_s4u', 'e:enterprise@abc@NOREALM', '-', r1.keytab],
expected_trace=msgs)
r1.stop()
r2.stop()
success('S4U test cases')
|
103,659,409,853,354 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/asn.1/asn1_encode.c */
/*
* Copyright 1994, 2008 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "asn1_encode.h"
struct asn1buf_st {
uint8_t *ptr; /* Position, moving backwards; may be NULL */
size_t count; /* Count of bytes written so far */
};
/**** Functions for encoding primitive types ****/
/* Insert one byte into buf going backwards. */
static inline void
insert_byte(asn1buf *buf, uint8_t o)
{
if (buf->ptr != NULL) {
buf->ptr--;
*buf->ptr = o;
}
buf->count++;
}
/* Insert a block of bytes into buf going backwards (but without reversing
* bytes). */
static inline void
insert_bytes(asn1buf *buf, const void *bytes, size_t len)
{
if (buf->ptr != NULL) {
memcpy(buf->ptr - len, bytes, len);
buf->ptr -= len;
}
buf->count += len;
}
void
k5_asn1_encode_bool(asn1buf *buf, intmax_t val)
{
insert_byte(buf, val ? 0xFF : 0x00);
}
void
k5_asn1_encode_int(asn1buf *buf, intmax_t val)
{
long valcopy;
int digit;
valcopy = val;
do {
digit = valcopy & 0xFF;
insert_byte(buf, digit);
valcopy = valcopy >> 8;
} while (valcopy != 0 && valcopy != ~0);
/* Make sure the high bit is of the proper signed-ness. */
if (val > 0 && (digit & 0x80) == 0x80)
insert_byte(buf, 0);
else if (val < 0 && (digit & 0x80) != 0x80)
insert_byte(buf, 0xFF);
}
void
k5_asn1_encode_uint(asn1buf *buf, uintmax_t val)
{
uintmax_t valcopy;
int digit;
valcopy = val;
do {
digit = valcopy & 0xFF;
insert_byte(buf, digit);
valcopy = valcopy >> 8;
} while (valcopy != 0);
/* Make sure the high bit is of the proper signed-ness. */
if (digit & 0x80)
insert_byte(buf, 0);
}
krb5_error_code
k5_asn1_encode_bytestring(asn1buf *buf, uint8_t *const *val, size_t len)
{
if (len > 0 && val == NULL)
return ASN1_MISSING_FIELD;
insert_bytes(buf, *val, len);
return 0;
}
krb5_error_code
k5_asn1_encode_generaltime(asn1buf *buf, time_t val)
{
struct tm *gtime, gtimebuf;
char s[16], *sp;
time_t gmt_time = val;
int len;
/*
* Time encoding: YYYYMMDDhhmmssZ
*/
if (gmt_time == 0) {
sp = "19700101000000Z";
} else {
/*
* Sanity check this just to be paranoid, as gmtime can return NULL,
* and some bogus implementations might overrun on the sprintf.
*/
#ifdef HAVE_GMTIME_R
#ifdef GMTIME_R_RETURNS_INT
if (gmtime_r(&gmt_time, >imebuf) != 0)
return ASN1_BAD_GMTIME;
#else
if (gmtime_r(&gmt_time, >imebuf) == NULL)
return ASN1_BAD_GMTIME;
#endif
#else /* HAVE_GMTIME_R */
gtime = gmtime(&gmt_time);
if (gtime == NULL)
return ASN1_BAD_GMTIME;
memcpy(>imebuf, gtime, sizeof(gtimebuf));
#endif /* HAVE_GMTIME_R */
gtime = >imebuf;
if (gtime->tm_year > 8099 || gtime->tm_mon > 11 ||
gtime->tm_mday > 31 || gtime->tm_hour > 23 ||
gtime->tm_min > 59 || gtime->tm_sec > 59)
return ASN1_BAD_GMTIME;
len = snprintf(s, sizeof(s), "%04d%02d%02d%02d%02d%02dZ",
1900 + gtime->tm_year, gtime->tm_mon + 1,
gtime->tm_mday, gtime->tm_hour,
gtime->tm_min, gtime->tm_sec);
if (SNPRINTF_OVERFLOW(len, sizeof(s)))
/* Shouldn't be possible given above tests. */
return ASN1_BAD_GMTIME;
sp = s;
}
insert_bytes(buf, sp, 15);
return 0;
}
krb5_error_code
k5_asn1_encode_bitstring(asn1buf *buf, uint8_t *const *val, size_t len)
{
insert_bytes(buf, *val, len);
insert_byte(buf, 0);
return 0;
}
/**** Functions for decoding primitive types ****/
krb5_error_code
k5_asn1_decode_bool(const uint8_t *asn1, size_t len, intmax_t *val)
{
if (len != 1)
return ASN1_BAD_LENGTH;
*val = (*asn1 != 0);
return 0;
}
/* Decode asn1/len as the contents of a DER integer, placing the signed result
* in val. */
krb5_error_code
k5_asn1_decode_int(const uint8_t *asn1, size_t len, intmax_t *val)
{
intmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
n = (asn1[0] & 0x80) ? -1 : 0;
/* Check length; allow extra octet if first octet is 0. */
if (len > sizeof(intmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0; i < len; i++)
n = (n << 8) | asn1[i];
*val = n;
return 0;
}
/* Decode asn1/len as the contents of a DER integer, placing the unsigned
* result in val. */
krb5_error_code
k5_asn1_decode_uint(const uint8_t *asn1, size_t len, uintmax_t *val)
{
uintmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
/* Check for negative values and check length. */
if ((asn1[0] & 0x80) || len > sizeof(uintmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0, n = 0; i < len; i++)
n = (n << 8) | asn1[i];
*val = n;
return 0;
}
krb5_error_code
k5_asn1_decode_bytestring(const uint8_t *asn1, size_t len,
uint8_t **str_out, size_t *len_out)
{
uint8_t *str;
*str_out = NULL;
*len_out = 0;
if (len == 0)
return 0;
str = malloc(len);
if (str == NULL)
return ENOMEM;
memcpy(str, asn1, len);
*str_out = str;
*len_out = len;
return 0;
}
krb5_error_code
k5_asn1_decode_generaltime(const uint8_t *asn1, size_t len, time_t *time_out)
{
const char *s = (char *)asn1;
struct tm ts;
time_t t;
*time_out = 0;
if (len != 15)
return ASN1_BAD_LENGTH;
/* Time encoding: YYYYMMDDhhmmssZ */
if (s[14] != 'Z')
return ASN1_BAD_FORMAT;
if (memcmp(s, "19700101000000Z", 15) == 0) {
*time_out = 0;
return 0;
}
#define c2i(c) ((c) - '0')
ts.tm_year = 1000 * c2i(s[0]) + 100 * c2i(s[1]) + 10 * c2i(s[2]) +
c2i(s[3]) - 1900;
ts.tm_mon = 10 * c2i(s[4]) + c2i(s[5]) - 1;
ts.tm_mday = 10 * c2i(s[6]) + c2i(s[7]);
ts.tm_hour = 10 * c2i(s[8]) + c2i(s[9]);
ts.tm_min = 10 * c2i(s[10]) + c2i(s[11]);
ts.tm_sec = 10 * c2i(s[12]) + c2i(s[13]);
ts.tm_isdst = -1;
t = krb5int_gmt_mktime(&ts);
if (t == -1)
return ASN1_BAD_TIMEFORMAT;
*time_out = t;
return 0;
}
/*
* Note: we return the number of bytes, not bits, in the bit string. If the
* number of bits is not a multiple of 8 we effectively round up to the next
* multiple of 8.
*/
krb5_error_code
k5_asn1_decode_bitstring(const uint8_t *asn1, size_t len,
uint8_t **bits_out, size_t *len_out)
{
uint8_t unused, *bits;
*bits_out = NULL;
*len_out = 0;
if (len == 0)
return ASN1_BAD_LENGTH;
unused = *asn1++;
len--;
if (unused > 7)
return ASN1_BAD_FORMAT;
bits = malloc(len);
if (bits == NULL)
return ENOMEM;
memcpy(bits, asn1, len);
if (len > 1)
bits[len - 1] &= (0xff << unused);
*bits_out = bits;
*len_out = len;
return 0;
}
/**** Functions for encoding and decoding tags ****/
/* Encode a DER tag into buf with the tag parameters in t and the content
* length len. Place the length of the encoded tag in *retlen. */
static krb5_error_code
make_tag(asn1buf *buf, const taginfo *t, size_t len)
{
asn1_tagnum tag_copy;
size_t len_copy, oldcount;
if (t->tagnum > ASN1_TAGNUM_MAX)
return ASN1_OVERFLOW;
/* Encode the length of the content within the tag. */
if (len < 128) {
insert_byte(buf, len & 0x7F);
} else {
oldcount = buf->count;
for (len_copy = len; len_copy != 0; len_copy >>= 8)
insert_byte(buf, len_copy & 0xFF);
insert_byte(buf, 0x80 | ((buf->count - oldcount) & 0x7F));
}
/* Encode the tag and construction bit. */
if (t->tagnum < 31) {
insert_byte(buf, t->asn1class | t->construction | t->tagnum);
} else {
tag_copy = t->tagnum;
insert_byte(buf, tag_copy & 0x7F);
tag_copy >>= 7;
for (; tag_copy != 0; tag_copy >>= 7)
insert_byte(buf, 0x80 | (tag_copy & 0x7F));
insert_byte(buf, t->asn1class | t->construction | 0x1F);
}
return 0;
}
/*
* Read a BER tag and length from asn1/len. Place the tag parameters in
* tag_out. Set contents_out/clen_out to the octet range of the tag's
* contents, and remainder_out/rlen_out to the octet range after the end of the
* BER encoding.
*
* (krb5 ASN.1 encodings should be in DER, but for compatibility with some
* really ancient implementations we handle the indefinite length form in tags.
* However, we still insist on the primitive form of string types.)
*/
static krb5_error_code
get_tag(const uint8_t *asn1, size_t len, taginfo *tag_out,
const uint8_t **contents_out, size_t *clen_out,
const uint8_t **remainder_out, size_t *rlen_out)
{
krb5_error_code ret;
uint8_t o;
const uint8_t *c, *p, *tag_start = asn1;
size_t clen, llen, i;
taginfo t;
*contents_out = *remainder_out = NULL;
*clen_out = *rlen_out = 0;
if (len == 0)
return ASN1_OVERRUN;
o = *asn1++;
len--;
tag_out->asn1class = o & 0xC0;
tag_out->construction = o & 0x20;
if ((o & 0x1F) != 0x1F) {
tag_out->tagnum = o & 0x1F;
} else {
tag_out->tagnum = 0;
do {
if (len == 0)
return ASN1_OVERRUN;
o = *asn1++;
len--;
tag_out->tagnum = (tag_out->tagnum << 7) | (o & 0x7F);
} while (o & 0x80);
}
if (len == 0)
return ASN1_OVERRUN;
o = *asn1++;
len--;
if (o == 0x80) {
/* Indefinite form (should not be present in DER, but we accept it). */
if (tag_out->construction != CONSTRUCTED)
return ASN1_MISMATCH_INDEF;
p = asn1;
while (!(len >= 2 && p[0] == 0 && p[1] == 0)) {
ret = get_tag(p, len, &t, &c, &clen, &p, &len);
if (ret)
return ret;
}
tag_out->tag_end_len = 2;
*contents_out = asn1;
*clen_out = p - asn1;
*remainder_out = p + 2;
*rlen_out = len - 2;
} else if ((o & 0x80) == 0) {
/* Short form (first octet gives content length). */
if (o > len)
return ASN1_OVERRUN;
tag_out->tag_end_len = 0;
*contents_out = asn1;
*clen_out = o;
*remainder_out = asn1 + *clen_out;
*rlen_out = len - (*remainder_out - asn1);
} else {
/* Long form (first octet gives number of base-256 length octets). */
llen = o & 0x7F;
if (llen > len)
return ASN1_OVERRUN;
if (llen > sizeof(*clen_out))
return ASN1_OVERFLOW;
for (i = 0, clen = 0; i < llen; i++)
clen = (clen << 8) | asn1[i];
if (clen > len - llen)
return ASN1_OVERRUN;
tag_out->tag_end_len = 0;
*contents_out = asn1 + llen;
*clen_out = clen;
*remainder_out = *contents_out + clen;
*rlen_out = len - (*remainder_out - asn1);
}
tag_out->tag_len = *contents_out - tag_start;
return 0;
}
#ifdef POINTERS_ARE_ALL_THE_SAME
#define LOADPTR(PTR, TYPE) (*(const void *const *)(PTR))
#define STOREPTR(PTR, TYPE, VAL) (*(void **)(VAL) = (PTR))
#else
#define LOADPTR(PTR, PTRINFO) \
(assert((PTRINFO)->loadptr != NULL), (PTRINFO)->loadptr(PTR))
#define STOREPTR(PTR, PTRINFO, VAL) \
(assert((PTRINFO)->storeptr != NULL), (PTRINFO)->storeptr(PTR, VAL))
#endif
static size_t
get_nullterm_sequence_len(const void *valp, const struct atype_info *seq)
{
size_t i;
const struct atype_info *a;
const struct ptr_info *ptr;
const void *elt, *eltptr;
a = seq;
i = 0;
assert(a->type == atype_ptr);
assert(seq->size != 0);
ptr = a->tinfo;
while (1) {
eltptr = (const char *)valp + i * seq->size;
elt = LOADPTR(eltptr, ptr);
if (elt == NULL)
break;
i++;
}
return i;
}
static krb5_error_code
encode_sequence_of(asn1buf *buf, size_t seqlen, const void *val,
const struct atype_info *eltinfo);
static krb5_error_code
encode_nullterm_sequence_of(asn1buf *buf, const void *val,
const struct atype_info *type, int can_be_empty)
{
size_t len = get_nullterm_sequence_len(val, type);
if (!can_be_empty && len == 0)
return ASN1_MISSING_FIELD;
return encode_sequence_of(buf, len, val, type);
}
static intmax_t
load_int(const void *val, size_t size)
{
switch (size) {
case 1: return *(int8_t *)val;
case 2: return *(int16_t *)val;
case 4: return *(int32_t *)val;
case 8: return *(int64_t *)val;
default: abort();
}
}
static uintmax_t
load_uint(const void *val, size_t size)
{
switch (size) {
case 1: return *(uint8_t *)val;
case 2: return *(uint16_t *)val;
case 4: return *(uint32_t *)val;
case 8: return *(uint64_t *)val;
default: abort();
}
}
static krb5_error_code
load_count(const void *val, const struct counted_info *counted,
size_t *count_out)
{
const void *countptr = (const char *)val + counted->lenoff;
assert(sizeof(size_t) <= sizeof(uintmax_t));
if (counted->lensigned) {
intmax_t xlen = load_int(countptr, counted->lensize);
if (xlen < 0 || (uintmax_t)xlen > SIZE_MAX)
return EINVAL;
*count_out = xlen;
} else {
uintmax_t xlen = load_uint(countptr, counted->lensize);
if ((size_t)xlen != xlen || xlen > SIZE_MAX)
return EINVAL;
*count_out = xlen;
}
return 0;
}
static krb5_error_code
store_int(intmax_t intval, size_t size, void *val)
{
switch (size) {
case 1:
if ((int8_t)intval != intval)
return ASN1_OVERFLOW;
*(int8_t *)val = intval;
return 0;
case 2:
if ((int16_t)intval != intval)
return ASN1_OVERFLOW;
*(int16_t *)val = intval;
return 0;
case 4:
if ((int32_t)intval != intval)
return ASN1_OVERFLOW;
*(int32_t *)val = intval;
return 0;
case 8:
if ((int64_t)intval != intval)
return ASN1_OVERFLOW;
*(int64_t *)val = intval;
return 0;
default:
abort();
}
}
static krb5_error_code
store_uint(uintmax_t intval, size_t size, void *val)
{
switch (size) {
case 1:
if ((uint8_t)intval != intval)
return ASN1_OVERFLOW;
*(uint8_t *)val = intval;
return 0;
case 2:
if ((uint16_t)intval != intval)
return ASN1_OVERFLOW;
*(uint16_t *)val = intval;
return 0;
case 4:
if ((uint32_t)intval != intval)
return ASN1_OVERFLOW;
*(uint32_t *)val = intval;
return 0;
case 8:
if ((uint64_t)intval != intval)
return ASN1_OVERFLOW;
*(uint64_t *)val = intval;
return 0;
default:
abort();
}
}
/* Store a count value in an integer field of a structure. If count is
* SIZE_MAX and the target is a signed field, store -1. */
static krb5_error_code
store_count(size_t count, const struct counted_info *counted, void *val)
{
void *countptr = (char *)val + counted->lenoff;
if (counted->lensigned) {
if (count == SIZE_MAX)
return store_int(-1, counted->lensize, countptr);
else if ((intmax_t)count < 0)
return ASN1_OVERFLOW;
else
return store_int(count, counted->lensize, countptr);
} else
return store_uint(count, counted->lensize, countptr);
}
/* Split a DER encoding into tag and contents. Insert the contents into buf,
* then return the length of the contents and the tag. */
static krb5_error_code
split_der(asn1buf *buf, uint8_t *const *der, size_t len, taginfo *tag_out)
{
krb5_error_code ret;
const uint8_t *contents, *remainder;
size_t clen, rlen;
ret = get_tag(*der, len, tag_out, &contents, &clen, &remainder, &rlen);
if (ret)
return ret;
if (rlen != 0)
return ASN1_BAD_LENGTH;
insert_bytes(buf, contents, clen);
return 0;
}
/*
* Store the DER encoding given by t and asn1/len into the char * or
* uint8_t * pointed to by val. Set *count_out to the length of the
* DER encoding.
*/
static krb5_error_code
store_der(const taginfo *t, const uint8_t *asn1, size_t len, void *val,
size_t *count_out)
{
uint8_t *der;
size_t der_len;
*count_out = 0;
der_len = t->tag_len + len + t->tag_end_len;
der = malloc(der_len);
if (der == NULL)
return ENOMEM;
memcpy(der, asn1 - t->tag_len, der_len);
*(uint8_t **)val = der;
*count_out = der_len;
return 0;
}
static krb5_error_code
encode_sequence(asn1buf *buf, const void *val, const struct seq_info *seq);
static krb5_error_code
encode_cntype(asn1buf *buf, const void *val, size_t len,
const struct cntype_info *c, taginfo *tag_out);
/* Encode a value (contents only, no outer tag) according to a type, and return
* its encoded tag information. */
static krb5_error_code
encode_atype(asn1buf *buf, const void *val, const struct atype_info *a,
taginfo *tag_out)
{
krb5_error_code ret;
if (val == NULL)
return ASN1_MISSING_FIELD;
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
assert(fn->enc != NULL);
return fn->enc(buf, val, tag_out);
}
case atype_sequence:
assert(a->tinfo != NULL);
ret = encode_sequence(buf, val, a->tinfo);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = CONSTRUCTED;
tag_out->tagnum = ASN1_SEQUENCE;
break;
case atype_ptr: {
const struct ptr_info *ptr = a->tinfo;
assert(ptr->basetype != NULL);
return encode_atype(buf, LOADPTR(val, ptr), ptr->basetype, tag_out);
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
return encode_atype(buf, (const char *)val + off->dataoff,
off->basetype, tag_out);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
assert(opt->is_present != NULL);
if (opt->is_present(val))
return encode_atype(buf, val, opt->basetype, tag_out);
else
return ASN1_OMITTED;
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
const void *dataptr = (const char *)val + counted->dataoff;
size_t count;
assert(counted->basetype != NULL);
ret = load_count(val, counted, &count);
if (ret)
return ret;
return encode_cntype(buf, dataptr, count, counted->basetype, tag_out);
}
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
assert(a->tinfo != NULL);
ret = encode_nullterm_sequence_of(buf, val, a->tinfo,
a->type ==
atype_nullterm_sequence_of);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = CONSTRUCTED;
tag_out->tagnum = ASN1_SEQUENCE;
break;
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
size_t oldcount = buf->count;
ret = encode_atype(buf, val, tag->basetype, tag_out);
if (ret)
return ret;
if (!tag->implicit) {
ret = make_tag(buf, tag_out, buf->count - oldcount);
if (ret)
return ret;
tag_out->construction = tag->construction;
}
tag_out->asn1class = tag->tagtype;
tag_out->tagnum = tag->tagval;
break;
}
case atype_bool:
k5_asn1_encode_bool(buf, load_int(val, a->size));
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_BOOLEAN;
break;
case atype_int:
k5_asn1_encode_int(buf, load_int(val, a->size));
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_INTEGER;
break;
case atype_uint:
k5_asn1_encode_uint(buf, load_uint(val, a->size));
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_INTEGER;
break;
case atype_int_immediate: {
const struct immediate_info *imm = a->tinfo;
k5_asn1_encode_int(buf, imm->val);
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_INTEGER;
break;
}
default:
assert(a->type > atype_min);
assert(a->type < atype_max);
abort();
}
return 0;
}
static krb5_error_code
encode_atype_and_tag(asn1buf *buf, const void *val, const struct atype_info *a)
{
taginfo t;
krb5_error_code ret;
size_t oldcount = buf->count;
ret = encode_atype(buf, val, a, &t);
if (ret)
return ret;
ret = make_tag(buf, &t, buf->count - oldcount);
if (ret)
return ret;
return 0;
}
/*
* Encode an object and count according to a cntype_info structure. val is a
* pointer to the object being encoded, which in most cases is itself a
* pointer (but is a union in the cntype_choice case).
*/
static krb5_error_code
encode_cntype(asn1buf *buf, const void *val, size_t count,
const struct cntype_info *c, taginfo *tag_out)
{
krb5_error_code ret;
switch (c->type) {
case cntype_string: {
const struct string_info *string = c->tinfo;
assert(string->enc != NULL);
ret = string->enc(buf, val, count);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = string->tagval;
break;
}
case cntype_der:
return split_der(buf, val, count, tag_out);
case cntype_seqof: {
const struct atype_info *a = c->tinfo;
const struct ptr_info *ptr = a->tinfo;
assert(a->type == atype_ptr);
val = LOADPTR(val, ptr);
ret = encode_sequence_of(buf, count, val, ptr->basetype);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = CONSTRUCTED;
tag_out->tagnum = ASN1_SEQUENCE;
break;
}
case cntype_choice: {
const struct choice_info *choice = c->tinfo;
if (count >= choice->n_options)
return ASN1_MISSING_FIELD;
return encode_atype(buf, val, choice->options[count], tag_out);
}
default:
assert(c->type > cntype_min);
assert(c->type < cntype_max);
abort();
}
return 0;
}
static krb5_error_code
encode_sequence(asn1buf *buf, const void *val, const struct seq_info *seq)
{
krb5_error_code ret;
size_t i;
for (i = seq->n_fields; i > 0; i--) {
ret = encode_atype_and_tag(buf, val, seq->fields[i - 1]);
if (ret == ASN1_OMITTED)
continue;
else if (ret != 0)
return ret;
}
return 0;
}
static krb5_error_code
encode_sequence_of(asn1buf *buf, size_t seqlen, const void *val,
const struct atype_info *eltinfo)
{
krb5_error_code ret;
size_t i;
const void *eltptr;
assert(eltinfo->size != 0);
for (i = seqlen; i > 0; i--) {
eltptr = (const char *)val + (i - 1) * eltinfo->size;
ret = encode_atype_and_tag(buf, eltptr, eltinfo);
if (ret)
return ret;
}
return 0;
}
/**** Functions for freeing C objects based on type info ****/
static void free_atype_ptr(const struct atype_info *a, void *val);
static void free_sequence(const struct seq_info *seq, void *val);
static void free_sequence_of(const struct atype_info *eltinfo, void *val,
size_t count);
static void free_cntype(const struct cntype_info *a, void *val, size_t count);
/*
* Free a C object according to a type description. Do not free pointers at
* the first level; they may be referenced by other fields of a sequence, and
* will be freed by free_atype_ptr in a second pass.
*/
static void
free_atype(const struct atype_info *a, void *val)
{
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
if (fn->free_func != NULL)
fn->free_func(val);
break;
}
case atype_sequence:
free_sequence(a->tinfo, val);
break;
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
void *ptr = LOADPTR(val, ptrinfo);
if (ptr != NULL) {
free_atype(ptrinfo->basetype, ptr);
free_atype_ptr(ptrinfo->basetype, ptr);
}
break;
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
free_atype(off->basetype, (char *)val + off->dataoff);
break;
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
free_atype(opt->basetype, val);
break;
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
void *dataptr = (char *)val + counted->dataoff;
size_t count;
if (load_count(val, counted, &count) == 0)
free_cntype(counted->basetype, dataptr, count);
break;
}
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of: {
size_t count = get_nullterm_sequence_len(val, a->tinfo);
free_sequence_of(a->tinfo, val, count);
break;
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
free_atype(tag->basetype, val);
break;
}
case atype_bool:
case atype_int:
case atype_uint:
case atype_int_immediate:
break;
default:
abort();
}
}
static void
free_atype_ptr(const struct atype_info *a, void *val)
{
switch (a->type) {
case atype_fn:
case atype_sequence:
case atype_counted:
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
case atype_bool:
case atype_int:
case atype_uint:
case atype_int_immediate:
break;
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
void *ptr = LOADPTR(val, ptrinfo);
free(ptr);
STOREPTR(NULL, ptrinfo, val);
break;
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
free_atype_ptr(off->basetype, (char *)val + off->dataoff);
break;
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
free_atype_ptr(opt->basetype, val);
break;
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
free_atype_ptr(tag->basetype, val);
break;
}
default:
abort();
}
}
static void
free_cntype(const struct cntype_info *c, void *val, size_t count)
{
switch (c->type) {
case cntype_string:
case cntype_der:
free(*(char **)val);
*(char **)val = NULL;
break;
case cntype_seqof: {
const struct atype_info *a = c->tinfo;
const struct ptr_info *ptrinfo = a->tinfo;
void *seqptr = LOADPTR(val, ptrinfo);
free_sequence_of(ptrinfo->basetype, seqptr, count);
free(seqptr);
STOREPTR(NULL, ptrinfo, val);
break;
}
case cntype_choice: {
const struct choice_info *choice = c->tinfo;
if (count < choice->n_options) {
free_atype(choice->options[count], val);
free_atype_ptr(choice->options[count], val);
}
break;
}
default:
abort();
}
}
static void
free_sequence(const struct seq_info *seq, void *val)
{
size_t i;
for (i = 0; i < seq->n_fields; i++)
free_atype(seq->fields[i], val);
for (i = 0; i < seq->n_fields; i++)
free_atype_ptr(seq->fields[i], val);
}
static void
free_sequence_of(const struct atype_info *eltinfo, void *val, size_t count)
{
void *eltptr;
assert(eltinfo->size != 0);
while (count-- > 0) {
eltptr = (char *)val + count * eltinfo->size;
free_atype(eltinfo, eltptr);
free_atype_ptr(eltinfo, eltptr);
}
}
/**** Functions for decoding objects based on type info ****/
/* Return nonzero if t is an expected tag for an ASN.1 object of type a. */
static int
check_atype_tag(const struct atype_info *a, const taginfo *t)
{
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
assert(fn->check_tag != NULL);
return fn->check_tag(t);
}
case atype_sequence:
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
return (t->asn1class == UNIVERSAL && t->construction == CONSTRUCTED &&
t->tagnum == ASN1_SEQUENCE);
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
return check_atype_tag(ptrinfo->basetype, t);
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
return check_atype_tag(off->basetype, t);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
return check_atype_tag(opt->basetype, t);
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
switch (counted->basetype->type) {
case cntype_string: {
const struct string_info *string = counted->basetype->tinfo;
return (t->asn1class == UNIVERSAL &&
t->construction == PRIMITIVE &&
t->tagnum == string->tagval);
}
case cntype_seqof:
return (t->asn1class == UNIVERSAL &&
t->construction == CONSTRUCTED &&
t->tagnum == ASN1_SEQUENCE);
case cntype_der:
/*
* We treat any tag as matching a stored DER encoding. In some
* cases we know what the tag should be; in others, we truly want
* to accept any tag. If it ever becomes an issue, we could add
* optional tag info to the type and check it here.
*/
return 1;
case cntype_choice:
/*
* ASN.1 choices may or may not be extensible. For now, we treat
* all choices as extensible and match any tag. We should consider
* modeling whether choices are extensible before making the
* encoder visible to plugins.
*/
return 1;
default:
abort();
}
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
/* NOTE: Doesn't check construction bit for implicit tags. */
if (!tag->implicit && t->construction != tag->construction)
return 0;
return (t->asn1class == tag->tagtype && t->tagnum == tag->tagval);
}
case atype_bool:
return (t->asn1class == UNIVERSAL && t->construction == PRIMITIVE &&
t->tagnum == ASN1_BOOLEAN);
case atype_int:
case atype_uint:
case atype_int_immediate:
return (t->asn1class == UNIVERSAL && t->construction == PRIMITIVE &&
t->tagnum == ASN1_INTEGER);
default:
abort();
}
}
static krb5_error_code
decode_cntype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct cntype_info *c, void *val, size_t *count_out);
static krb5_error_code
decode_atype_to_ptr(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *basetype, void **ptr_out);
static krb5_error_code
decode_sequence(const uint8_t *asn1, size_t len, const struct seq_info *seq,
void *val);
static krb5_error_code
decode_sequence_of(const uint8_t *asn1, size_t len,
const struct atype_info *elemtype, void **seq_out,
size_t *count_out);
/* Given the enclosing tag t, decode from asn1/len the contents of the ASN.1
* type specified by a, placing the result into val (caller-allocated). */
static krb5_error_code
decode_atype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *a, void *val)
{
krb5_error_code ret;
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
assert(fn->dec != NULL);
return fn->dec(t, asn1, len, val);
}
case atype_sequence:
return decode_sequence(asn1, len, a->tinfo, val);
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
void *ptr = LOADPTR(val, ptrinfo);
assert(ptrinfo->basetype != NULL);
if (ptr != NULL) {
/* Container was already allocated by a previous sequence field. */
return decode_atype(t, asn1, len, ptrinfo->basetype, ptr);
} else {
ret = decode_atype_to_ptr(t, asn1, len, ptrinfo->basetype, &ptr);
if (ret)
return ret;
STOREPTR(ptr, ptrinfo, val);
break;
}
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
return decode_atype(t, asn1, len, off->basetype,
(char *)val + off->dataoff);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
return decode_atype(t, asn1, len, opt->basetype, val);
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
void *dataptr = (char *)val + counted->dataoff;
size_t count;
assert(counted->basetype != NULL);
ret = decode_cntype(t, asn1, len, counted->basetype, dataptr, &count);
if (ret)
return ret;
return store_count(count, counted, val);
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
taginfo inner_tag;
const taginfo *tp = t;
const uint8_t *rem;
size_t rlen;
if (!tag->implicit) {
ret = get_tag(asn1, len, &inner_tag, &asn1, &len, &rem, &rlen);
if (ret)
return ret;
/* Note: we don't check rlen (it should be 0). */
tp = &inner_tag;
if (!check_atype_tag(tag->basetype, tp))
return ASN1_BAD_ID;
}
return decode_atype(tp, asn1, len, tag->basetype, val);
}
case atype_bool: {
intmax_t intval;
ret = k5_asn1_decode_bool(asn1, len, &intval);
if (ret)
return ret;
return store_int(intval, a->size, val);
}
case atype_int: {
intmax_t intval;
ret = k5_asn1_decode_int(asn1, len, &intval);
if (ret)
return ret;
return store_int(intval, a->size, val);
}
case atype_uint: {
uintmax_t intval;
ret = k5_asn1_decode_uint(asn1, len, &intval);
if (ret)
return ret;
return store_uint(intval, a->size, val);
}
case atype_int_immediate: {
const struct immediate_info *imm = a->tinfo;
intmax_t intval;
ret = k5_asn1_decode_int(asn1, len, &intval);
if (ret)
return ret;
if (intval != imm->val && imm->err != 0)
return imm->err;
break;
}
default:
/* Null-terminated sequence types are handled in decode_atype_to_ptr,
* since they create variable-sized objects. */
assert(a->type != atype_nullterm_sequence_of);
assert(a->type != atype_nonempty_nullterm_sequence_of);
assert(a->type > atype_min);
assert(a->type < atype_max);
abort();
}
return 0;
}
/*
* Given the enclosing tag t, decode from asn1/len the contents of the
* ASN.1 type described by c, placing the counted result into val/count_out.
* If the resulting count should be -1 (for an unknown union distinguisher),
* set *count_out to SIZE_MAX.
*/
static krb5_error_code
decode_cntype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct cntype_info *c, void *val, size_t *count_out)
{
krb5_error_code ret;
switch (c->type) {
case cntype_string: {
const struct string_info *string = c->tinfo;
assert(string->dec != NULL);
return string->dec(asn1, len, val, count_out);
}
case cntype_der:
return store_der(t, asn1, len, val, count_out);
case cntype_seqof: {
const struct atype_info *a = c->tinfo;
const struct ptr_info *ptrinfo = a->tinfo;
void *seq;
assert(a->type == atype_ptr);
ret = decode_sequence_of(asn1, len, ptrinfo->basetype, &seq,
count_out);
if (ret)
return ret;
STOREPTR(seq, ptrinfo, val);
break;
}
case cntype_choice: {
const struct choice_info *choice = c->tinfo;
size_t i;
for (i = 0; i < choice->n_options; i++) {
if (check_atype_tag(choice->options[i], t)) {
ret = decode_atype(t, asn1, len, choice->options[i], val);
if (ret)
return ret;
*count_out = i;
return 0;
}
}
/* SIZE_MAX will be stored as -1 in the distinguisher. If we start
* modeling non-extensible choices we should check that here. */
*count_out = SIZE_MAX;
break;
}
default:
assert(c->type > cntype_min);
assert(c->type < cntype_max);
abort();
}
return 0;
}
/* Add a null pointer to the end of a sequence. ptr is consumed on success
* (to be replaced by *ptr_out), left alone on failure. */
static krb5_error_code
null_terminate(const struct atype_info *eltinfo, void *ptr, size_t count,
void **ptr_out)
{
const struct ptr_info *ptrinfo = eltinfo->tinfo;
void *endptr;
assert(eltinfo->type == atype_ptr);
ptr = realloc(ptr, (count + 1) * eltinfo->size);
if (ptr == NULL)
return ENOMEM;
endptr = (char *)ptr + count * eltinfo->size;
STOREPTR(NULL, ptrinfo, endptr);
*ptr_out = ptr;
return 0;
}
static krb5_error_code
decode_atype_to_ptr(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *a, void **ptr_out)
{
krb5_error_code ret;
void *ptr;
size_t count;
*ptr_out = NULL;
switch (a->type) {
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
ret = decode_sequence_of(asn1, len, a->tinfo, &ptr, &count);
if (ret)
return ret;
ret = null_terminate(a->tinfo, ptr, count, &ptr);
if (ret) {
free_sequence_of(a->tinfo, ptr, count);
return ret;
}
/* Historically we do not enforce non-emptiness of sequences when
* decoding, even when it is required by the ASN.1 type. */
break;
default:
ptr = calloc(a->size, 1);
if (ptr == NULL)
return ENOMEM;
ret = decode_atype(t, asn1, len, a, ptr);
if (ret) {
free(ptr);
return ret;
}
break;
}
*ptr_out = ptr;
return 0;
}
/* Initialize a C object when the corresponding ASN.1 type was omitted within a
* sequence. If the ASN.1 type is not optional, return ASN1_MISSING_FIELD. */
static krb5_error_code
omit_atype(const struct atype_info *a, void *val)
{
switch (a->type)
{
case atype_fn:
case atype_sequence:
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
case atype_counted:
case atype_bool:
case atype_int:
case atype_uint:
case atype_int_immediate:
return ASN1_MISSING_FIELD;
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
return omit_atype(ptrinfo->basetype, val);
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
return omit_atype(off->basetype, (char *)val + off->dataoff);
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
return omit_atype(tag->basetype, val);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
if (opt->init != NULL)
opt->init(val);
return 0;
}
default:
abort();
}
}
/* Decode an ASN.1 sequence into a C object. */
static krb5_error_code
decode_sequence(const uint8_t *asn1, size_t len, const struct seq_info *seq,
void *val)
{
krb5_error_code ret;
const uint8_t *contents;
size_t i, j, clen;
taginfo t;
assert(seq->n_fields > 0);
for (i = 0; i < seq->n_fields; i++) {
if (len == 0)
break;
ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len);
if (ret)
goto error;
/*
* Find the applicable sequence field. This logic is a little
* oversimplified; we could match an element to an optional extensible
* choice or optional stored-DER type when we ought to match a
* subsequent non-optional field. But it's unwise and (hopefully) very
* rare for ASN.1 modules to require such precision.
*/
for (; i < seq->n_fields; i++) {
if (check_atype_tag(seq->fields[i], &t))
break;
ret = omit_atype(seq->fields[i], val);
if (ret)
goto error;
}
/* We currently model all sequences as extensible. We should consider
* changing this before making the encoder visible to plugins. */
if (i == seq->n_fields)
break;
ret = decode_atype(&t, contents, clen, seq->fields[i], val);
if (ret)
goto error;
}
/* Initialize any fields in the C object which were not accounted for in
* the sequence. Error out if any of them aren't optional. */
for (; i < seq->n_fields; i++) {
ret = omit_atype(seq->fields[i], val);
if (ret)
goto error;
}
return 0;
error:
/* Free what we've decoded so far. Free pointers in a second pass in
* case multiple fields refer to the same pointer. */
for (j = 0; j < i; j++)
free_atype(seq->fields[j], val);
for (j = 0; j < i; j++)
free_atype_ptr(seq->fields[j], val);
return ret;
}
static krb5_error_code
decode_sequence_of(const uint8_t *asn1, size_t len,
const struct atype_info *elemtype, void **seq_out,
size_t *count_out)
{
krb5_error_code ret;
void *seq = NULL, *elem, *newseq;
const uint8_t *contents;
size_t clen, count = 0;
taginfo t;
*seq_out = NULL;
*count_out = 0;
while (len > 0) {
ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len);
if (ret)
goto error;
if (!check_atype_tag(elemtype, &t)) {
ret = ASN1_BAD_ID;
goto error;
}
newseq = realloc(seq, (count + 1) * elemtype->size);
if (newseq == NULL) {
ret = ENOMEM;
goto error;
}
seq = newseq;
elem = (char *)seq + count * elemtype->size;
memset(elem, 0, elemtype->size);
ret = decode_atype(&t, contents, clen, elemtype, elem);
if (ret)
goto error;
count++;
}
*seq_out = seq;
*count_out = count;
return 0;
error:
free_sequence_of(elemtype, seq, count);
free(seq);
return ret;
}
/* These three entry points are only needed for the kdc_req_body hack and may
* go away at some point. Define them here so we can use short names above. */
krb5_error_code
k5_asn1_encode_atype(asn1buf *buf, const void *val, const struct atype_info *a,
taginfo *tag_out)
{
return encode_atype(buf, val, a, tag_out);
}
krb5_error_code
k5_asn1_decode_atype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *a, void *val)
{
return decode_atype(t, asn1, len, a, val);
}
krb5_error_code
k5_asn1_full_encode(const void *rep, const struct atype_info *a,
krb5_data **code_out)
{
krb5_error_code ret;
asn1buf buf;
krb5_data *d;
uint8_t *bytes;
*code_out = NULL;
if (rep == NULL)
return ASN1_MISSING_FIELD;
/* Make a first pass over rep to count the encoding size. */
buf.ptr = NULL;
buf.count = 0;
ret = encode_atype_and_tag(&buf, rep, a);
if (ret)
return ret;
/* Allocate space for the encoding. */
bytes = malloc(buf.count + 1);
if (bytes == NULL)
return ENOMEM;
bytes[buf.count] = 0;
/* Make a second pass over rep to encode it. buf.ptr moves backwards as we
* encode, and will always exactly return to the base. */
buf.ptr = bytes + buf.count;
buf.count = 0;
ret = encode_atype_and_tag(&buf, rep, a);
if (ret) {
free(bytes);
return ret;
}
assert(buf.ptr == bytes);
/* Create the output data object. */
*code_out = malloc(sizeof(*d));
if (*code_out == NULL) {
free(bytes);
return ENOMEM;
}
**code_out = make_data(bytes, buf.count);
return 0;
}
krb5_error_code
k5_asn1_full_decode(const krb5_data *code, const struct atype_info *a,
void **retrep)
{
krb5_error_code ret;
const uint8_t *contents, *remainder;
size_t clen, rlen;
taginfo t;
*retrep = NULL;
ret = get_tag((uint8_t *)code->data, code->length, &t, &contents,
&clen, &remainder, &rlen);
if (ret)
return ret;
/* rlen should be 0, but we don't check it (and due to padding in
* non-length-preserving enctypes, it will sometimes be nonzero). */
if (!check_atype_tag(a, &t))
return ASN1_BAD_ID;
return decode_atype_to_ptr(&t, contents, clen, a, retrep);
}
| /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/asn.1/asn1_encode.c */
/*
* Copyright 1994, 2008 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "asn1_encode.h"
struct asn1buf_st {
uint8_t *ptr; /* Position, moving backwards; may be NULL */
size_t count; /* Count of bytes written so far */
};
/**** Functions for encoding primitive types ****/
/* Insert one byte into buf going backwards. */
static inline void
insert_byte(asn1buf *buf, uint8_t o)
{
if (buf->ptr != NULL) {
buf->ptr--;
*buf->ptr = o;
}
buf->count++;
}
/* Insert a block of bytes into buf going backwards (but without reversing
* bytes). */
static inline void
insert_bytes(asn1buf *buf, const void *bytes, size_t len)
{
if (buf->ptr != NULL) {
memcpy(buf->ptr - len, bytes, len);
buf->ptr -= len;
}
buf->count += len;
}
void
k5_asn1_encode_bool(asn1buf *buf, intmax_t val)
{
insert_byte(buf, val ? 0xFF : 0x00);
}
void
k5_asn1_encode_int(asn1buf *buf, intmax_t val)
{
long valcopy;
int digit;
valcopy = val;
do {
digit = valcopy & 0xFF;
insert_byte(buf, digit);
valcopy = valcopy >> 8;
} while (valcopy != 0 && valcopy != ~0);
/* Make sure the high bit is of the proper signed-ness. */
if (val > 0 && (digit & 0x80) == 0x80)
insert_byte(buf, 0);
else if (val < 0 && (digit & 0x80) != 0x80)
insert_byte(buf, 0xFF);
}
void
k5_asn1_encode_uint(asn1buf *buf, uintmax_t val)
{
uintmax_t valcopy;
int digit;
valcopy = val;
do {
digit = valcopy & 0xFF;
insert_byte(buf, digit);
valcopy = valcopy >> 8;
} while (valcopy != 0);
/* Make sure the high bit is of the proper signed-ness. */
if (digit & 0x80)
insert_byte(buf, 0);
}
krb5_error_code
k5_asn1_encode_bytestring(asn1buf *buf, uint8_t *const *val, size_t len)
{
if (len > 0 && val == NULL)
return ASN1_MISSING_FIELD;
insert_bytes(buf, *val, len);
return 0;
}
krb5_error_code
k5_asn1_encode_generaltime(asn1buf *buf, time_t val)
{
struct tm *gtime, gtimebuf;
char s[16], *sp;
time_t gmt_time = val;
int len;
/*
* Time encoding: YYYYMMDDhhmmssZ
*/
if (gmt_time == 0) {
sp = "19700101000000Z";
} else {
/*
* Sanity check this just to be paranoid, as gmtime can return NULL,
* and some bogus implementations might overrun on the sprintf.
*/
#ifdef HAVE_GMTIME_R
#ifdef GMTIME_R_RETURNS_INT
if (gmtime_r(&gmt_time, >imebuf) != 0)
return ASN1_BAD_GMTIME;
#else
if (gmtime_r(&gmt_time, >imebuf) == NULL)
return ASN1_BAD_GMTIME;
#endif
#else /* HAVE_GMTIME_R */
gtime = gmtime(&gmt_time);
if (gtime == NULL)
return ASN1_BAD_GMTIME;
memcpy(>imebuf, gtime, sizeof(gtimebuf));
#endif /* HAVE_GMTIME_R */
gtime = >imebuf;
if (gtime->tm_year > 8099 || gtime->tm_mon > 11 ||
gtime->tm_mday > 31 || gtime->tm_hour > 23 ||
gtime->tm_min > 59 || gtime->tm_sec > 59)
return ASN1_BAD_GMTIME;
len = snprintf(s, sizeof(s), "%04d%02d%02d%02d%02d%02dZ",
1900 + gtime->tm_year, gtime->tm_mon + 1,
gtime->tm_mday, gtime->tm_hour,
gtime->tm_min, gtime->tm_sec);
if (SNPRINTF_OVERFLOW(len, sizeof(s)))
/* Shouldn't be possible given above tests. */
return ASN1_BAD_GMTIME;
sp = s;
}
insert_bytes(buf, sp, 15);
return 0;
}
krb5_error_code
k5_asn1_encode_bitstring(asn1buf *buf, uint8_t *const *val, size_t len)
{
insert_bytes(buf, *val, len);
insert_byte(buf, 0);
return 0;
}
/**** Functions for decoding primitive types ****/
krb5_error_code
k5_asn1_decode_bool(const uint8_t *asn1, size_t len, intmax_t *val)
{
if (len != 1)
return ASN1_BAD_LENGTH;
*val = (*asn1 != 0);
return 0;
}
/* Decode asn1/len as the contents of a DER integer, placing the signed result
* in val. */
krb5_error_code
k5_asn1_decode_int(const uint8_t *asn1, size_t len, intmax_t *val)
{
intmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
n = (asn1[0] & 0x80) ? -1 : 0;
/* Check length; allow extra octet if first octet is 0. */
if (len > sizeof(intmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0; i < len; i++)
n = (n << 8) | asn1[i];
*val = n;
return 0;
}
/* Decode asn1/len as the contents of a DER integer, placing the unsigned
* result in val. */
krb5_error_code
k5_asn1_decode_uint(const uint8_t *asn1, size_t len, uintmax_t *val)
{
uintmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
/* Check for negative values and check length. */
if ((asn1[0] & 0x80) || len > sizeof(uintmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0, n = 0; i < len; i++)
n = (n << 8) | asn1[i];
*val = n;
return 0;
}
krb5_error_code
k5_asn1_decode_bytestring(const uint8_t *asn1, size_t len,
uint8_t **str_out, size_t *len_out)
{
uint8_t *str;
*str_out = NULL;
*len_out = 0;
if (len == 0)
return 0;
str = malloc(len);
if (str == NULL)
return ENOMEM;
memcpy(str, asn1, len);
*str_out = str;
*len_out = len;
return 0;
}
krb5_error_code
k5_asn1_decode_generaltime(const uint8_t *asn1, size_t len, time_t *time_out)
{
const char *s = (char *)asn1;
struct tm ts;
time_t t;
*time_out = 0;
if (len != 15)
return ASN1_BAD_LENGTH;
/* Time encoding: YYYYMMDDhhmmssZ */
if (s[14] != 'Z')
return ASN1_BAD_FORMAT;
if (memcmp(s, "19700101000000Z", 15) == 0) {
*time_out = 0;
return 0;
}
#define c2i(c) ((c) - '0')
ts.tm_year = 1000 * c2i(s[0]) + 100 * c2i(s[1]) + 10 * c2i(s[2]) +
c2i(s[3]) - 1900;
ts.tm_mon = 10 * c2i(s[4]) + c2i(s[5]) - 1;
ts.tm_mday = 10 * c2i(s[6]) + c2i(s[7]);
ts.tm_hour = 10 * c2i(s[8]) + c2i(s[9]);
ts.tm_min = 10 * c2i(s[10]) + c2i(s[11]);
ts.tm_sec = 10 * c2i(s[12]) + c2i(s[13]);
ts.tm_isdst = -1;
t = krb5int_gmt_mktime(&ts);
if (t == -1)
return ASN1_BAD_TIMEFORMAT;
*time_out = t;
return 0;
}
/*
* Note: we return the number of bytes, not bits, in the bit string. If the
* number of bits is not a multiple of 8 we effectively round up to the next
* multiple of 8.
*/
krb5_error_code
k5_asn1_decode_bitstring(const uint8_t *asn1, size_t len,
uint8_t **bits_out, size_t *len_out)
{
uint8_t unused, *bits;
*bits_out = NULL;
*len_out = 0;
if (len == 0)
return ASN1_BAD_LENGTH;
unused = *asn1++;
len--;
if (unused > 7)
return ASN1_BAD_FORMAT;
bits = malloc(len);
if (bits == NULL)
return ENOMEM;
memcpy(bits, asn1, len);
if (len > 1)
bits[len - 1] &= (0xff << unused);
*bits_out = bits;
*len_out = len;
return 0;
}
/**** Functions for encoding and decoding tags ****/
/* Encode a DER tag into buf with the tag parameters in t and the content
* length len. Place the length of the encoded tag in *retlen. */
static krb5_error_code
make_tag(asn1buf *buf, const taginfo *t, size_t len)
{
asn1_tagnum tag_copy;
size_t len_copy, oldcount;
if (t->tagnum > ASN1_TAGNUM_MAX)
return ASN1_OVERFLOW;
/* Encode the length of the content within the tag. */
if (len < 128) {
insert_byte(buf, len & 0x7F);
} else {
oldcount = buf->count;
for (len_copy = len; len_copy != 0; len_copy >>= 8)
insert_byte(buf, len_copy & 0xFF);
insert_byte(buf, 0x80 | ((buf->count - oldcount) & 0x7F));
}
/* Encode the tag and construction bit. */
if (t->tagnum < 31) {
insert_byte(buf, t->asn1class | t->construction | t->tagnum);
} else {
tag_copy = t->tagnum;
insert_byte(buf, tag_copy & 0x7F);
tag_copy >>= 7;
for (; tag_copy != 0; tag_copy >>= 7)
insert_byte(buf, 0x80 | (tag_copy & 0x7F));
insert_byte(buf, t->asn1class | t->construction | 0x1F);
}
return 0;
}
/*
* Read a BER tag and length from asn1/len. Place the tag parameters in
* tag_out. Set contents_out/clen_out to the octet range of the tag's
* contents, and remainder_out/rlen_out to the octet range after the end of the
* BER encoding.
*
* (krb5 ASN.1 encodings should be in DER, but for compatibility with some
* really ancient implementations we handle the indefinite length form in tags.
* However, we still insist on the primitive form of string types.)
*/
static krb5_error_code
get_tag(const uint8_t *asn1, size_t len, taginfo *tag_out,
const uint8_t **contents_out, size_t *clen_out,
const uint8_t **remainder_out, size_t *rlen_out, int recursion)
{
krb5_error_code ret;
uint8_t o;
const uint8_t *c, *p, *tag_start = asn1;
size_t clen, llen, i;
taginfo t;
*contents_out = *remainder_out = NULL;
*clen_out = *rlen_out = 0;
if (len == 0)
return ASN1_OVERRUN;
o = *asn1++;
len--;
tag_out->asn1class = o & 0xC0;
tag_out->construction = o & 0x20;
if ((o & 0x1F) != 0x1F) {
tag_out->tagnum = o & 0x1F;
} else {
tag_out->tagnum = 0;
do {
if (len == 0)
return ASN1_OVERRUN;
o = *asn1++;
len--;
tag_out->tagnum = (tag_out->tagnum << 7) | (o & 0x7F);
} while (o & 0x80);
}
if (len == 0)
return ASN1_OVERRUN;
o = *asn1++;
len--;
if (o == 0x80) {
/* Indefinite form (should not be present in DER, but we accept it). */
if (tag_out->construction != CONSTRUCTED)
return ASN1_MISMATCH_INDEF;
if (recursion >= 32)
return ASN1_OVERFLOW;
p = asn1;
while (!(len >= 2 && p[0] == 0 && p[1] == 0)) {
ret = get_tag(p, len, &t, &c, &clen, &p, &len, recursion + 1);
if (ret)
return ret;
}
tag_out->tag_end_len = 2;
*contents_out = asn1;
*clen_out = p - asn1;
*remainder_out = p + 2;
*rlen_out = len - 2;
} else if ((o & 0x80) == 0) {
/* Short form (first octet gives content length). */
if (o > len)
return ASN1_OVERRUN;
tag_out->tag_end_len = 0;
*contents_out = asn1;
*clen_out = o;
*remainder_out = asn1 + *clen_out;
*rlen_out = len - (*remainder_out - asn1);
} else {
/* Long form (first octet gives number of base-256 length octets). */
llen = o & 0x7F;
if (llen > len)
return ASN1_OVERRUN;
if (llen > sizeof(*clen_out))
return ASN1_OVERFLOW;
for (i = 0, clen = 0; i < llen; i++)
clen = (clen << 8) | asn1[i];
if (clen > len - llen)
return ASN1_OVERRUN;
tag_out->tag_end_len = 0;
*contents_out = asn1 + llen;
*clen_out = clen;
*remainder_out = *contents_out + clen;
*rlen_out = len - (*remainder_out - asn1);
}
tag_out->tag_len = *contents_out - tag_start;
return 0;
}
#ifdef POINTERS_ARE_ALL_THE_SAME
#define LOADPTR(PTR, TYPE) (*(const void *const *)(PTR))
#define STOREPTR(PTR, TYPE, VAL) (*(void **)(VAL) = (PTR))
#else
#define LOADPTR(PTR, PTRINFO) \
(assert((PTRINFO)->loadptr != NULL), (PTRINFO)->loadptr(PTR))
#define STOREPTR(PTR, PTRINFO, VAL) \
(assert((PTRINFO)->storeptr != NULL), (PTRINFO)->storeptr(PTR, VAL))
#endif
static size_t
get_nullterm_sequence_len(const void *valp, const struct atype_info *seq)
{
size_t i;
const struct atype_info *a;
const struct ptr_info *ptr;
const void *elt, *eltptr;
a = seq;
i = 0;
assert(a->type == atype_ptr);
assert(seq->size != 0);
ptr = a->tinfo;
while (1) {
eltptr = (const char *)valp + i * seq->size;
elt = LOADPTR(eltptr, ptr);
if (elt == NULL)
break;
i++;
}
return i;
}
static krb5_error_code
encode_sequence_of(asn1buf *buf, size_t seqlen, const void *val,
const struct atype_info *eltinfo);
static krb5_error_code
encode_nullterm_sequence_of(asn1buf *buf, const void *val,
const struct atype_info *type, int can_be_empty)
{
size_t len = get_nullterm_sequence_len(val, type);
if (!can_be_empty && len == 0)
return ASN1_MISSING_FIELD;
return encode_sequence_of(buf, len, val, type);
}
static intmax_t
load_int(const void *val, size_t size)
{
switch (size) {
case 1: return *(int8_t *)val;
case 2: return *(int16_t *)val;
case 4: return *(int32_t *)val;
case 8: return *(int64_t *)val;
default: abort();
}
}
static uintmax_t
load_uint(const void *val, size_t size)
{
switch (size) {
case 1: return *(uint8_t *)val;
case 2: return *(uint16_t *)val;
case 4: return *(uint32_t *)val;
case 8: return *(uint64_t *)val;
default: abort();
}
}
static krb5_error_code
load_count(const void *val, const struct counted_info *counted,
size_t *count_out)
{
const void *countptr = (const char *)val + counted->lenoff;
assert(sizeof(size_t) <= sizeof(uintmax_t));
if (counted->lensigned) {
intmax_t xlen = load_int(countptr, counted->lensize);
if (xlen < 0 || (uintmax_t)xlen > SIZE_MAX)
return EINVAL;
*count_out = xlen;
} else {
uintmax_t xlen = load_uint(countptr, counted->lensize);
if ((size_t)xlen != xlen || xlen > SIZE_MAX)
return EINVAL;
*count_out = xlen;
}
return 0;
}
static krb5_error_code
store_int(intmax_t intval, size_t size, void *val)
{
switch (size) {
case 1:
if ((int8_t)intval != intval)
return ASN1_OVERFLOW;
*(int8_t *)val = intval;
return 0;
case 2:
if ((int16_t)intval != intval)
return ASN1_OVERFLOW;
*(int16_t *)val = intval;
return 0;
case 4:
if ((int32_t)intval != intval)
return ASN1_OVERFLOW;
*(int32_t *)val = intval;
return 0;
case 8:
if ((int64_t)intval != intval)
return ASN1_OVERFLOW;
*(int64_t *)val = intval;
return 0;
default:
abort();
}
}
static krb5_error_code
store_uint(uintmax_t intval, size_t size, void *val)
{
switch (size) {
case 1:
if ((uint8_t)intval != intval)
return ASN1_OVERFLOW;
*(uint8_t *)val = intval;
return 0;
case 2:
if ((uint16_t)intval != intval)
return ASN1_OVERFLOW;
*(uint16_t *)val = intval;
return 0;
case 4:
if ((uint32_t)intval != intval)
return ASN1_OVERFLOW;
*(uint32_t *)val = intval;
return 0;
case 8:
if ((uint64_t)intval != intval)
return ASN1_OVERFLOW;
*(uint64_t *)val = intval;
return 0;
default:
abort();
}
}
/* Store a count value in an integer field of a structure. If count is
* SIZE_MAX and the target is a signed field, store -1. */
static krb5_error_code
store_count(size_t count, const struct counted_info *counted, void *val)
{
void *countptr = (char *)val + counted->lenoff;
if (counted->lensigned) {
if (count == SIZE_MAX)
return store_int(-1, counted->lensize, countptr);
else if ((intmax_t)count < 0)
return ASN1_OVERFLOW;
else
return store_int(count, counted->lensize, countptr);
} else
return store_uint(count, counted->lensize, countptr);
}
/* Split a DER encoding into tag and contents. Insert the contents into buf,
* then return the length of the contents and the tag. */
static krb5_error_code
split_der(asn1buf *buf, uint8_t *const *der, size_t len, taginfo *tag_out)
{
krb5_error_code ret;
const uint8_t *contents, *remainder;
size_t clen, rlen;
ret = get_tag(*der, len, tag_out, &contents, &clen, &remainder, &rlen, 0);
if (ret)
return ret;
if (rlen != 0)
return ASN1_BAD_LENGTH;
insert_bytes(buf, contents, clen);
return 0;
}
/*
* Store the DER encoding given by t and asn1/len into the char * or
* uint8_t * pointed to by val. Set *count_out to the length of the
* DER encoding.
*/
static krb5_error_code
store_der(const taginfo *t, const uint8_t *asn1, size_t len, void *val,
size_t *count_out)
{
uint8_t *der;
size_t der_len;
*count_out = 0;
der_len = t->tag_len + len + t->tag_end_len;
der = malloc(der_len);
if (der == NULL)
return ENOMEM;
memcpy(der, asn1 - t->tag_len, der_len);
*(uint8_t **)val = der;
*count_out = der_len;
return 0;
}
static krb5_error_code
encode_sequence(asn1buf *buf, const void *val, const struct seq_info *seq);
static krb5_error_code
encode_cntype(asn1buf *buf, const void *val, size_t len,
const struct cntype_info *c, taginfo *tag_out);
/* Encode a value (contents only, no outer tag) according to a type, and return
* its encoded tag information. */
static krb5_error_code
encode_atype(asn1buf *buf, const void *val, const struct atype_info *a,
taginfo *tag_out)
{
krb5_error_code ret;
if (val == NULL)
return ASN1_MISSING_FIELD;
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
assert(fn->enc != NULL);
return fn->enc(buf, val, tag_out);
}
case atype_sequence:
assert(a->tinfo != NULL);
ret = encode_sequence(buf, val, a->tinfo);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = CONSTRUCTED;
tag_out->tagnum = ASN1_SEQUENCE;
break;
case atype_ptr: {
const struct ptr_info *ptr = a->tinfo;
assert(ptr->basetype != NULL);
return encode_atype(buf, LOADPTR(val, ptr), ptr->basetype, tag_out);
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
return encode_atype(buf, (const char *)val + off->dataoff,
off->basetype, tag_out);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
assert(opt->is_present != NULL);
if (opt->is_present(val))
return encode_atype(buf, val, opt->basetype, tag_out);
else
return ASN1_OMITTED;
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
const void *dataptr = (const char *)val + counted->dataoff;
size_t count;
assert(counted->basetype != NULL);
ret = load_count(val, counted, &count);
if (ret)
return ret;
return encode_cntype(buf, dataptr, count, counted->basetype, tag_out);
}
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
assert(a->tinfo != NULL);
ret = encode_nullterm_sequence_of(buf, val, a->tinfo,
a->type ==
atype_nullterm_sequence_of);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = CONSTRUCTED;
tag_out->tagnum = ASN1_SEQUENCE;
break;
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
size_t oldcount = buf->count;
ret = encode_atype(buf, val, tag->basetype, tag_out);
if (ret)
return ret;
if (!tag->implicit) {
ret = make_tag(buf, tag_out, buf->count - oldcount);
if (ret)
return ret;
tag_out->construction = tag->construction;
}
tag_out->asn1class = tag->tagtype;
tag_out->tagnum = tag->tagval;
break;
}
case atype_bool:
k5_asn1_encode_bool(buf, load_int(val, a->size));
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_BOOLEAN;
break;
case atype_int:
k5_asn1_encode_int(buf, load_int(val, a->size));
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_INTEGER;
break;
case atype_uint:
k5_asn1_encode_uint(buf, load_uint(val, a->size));
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_INTEGER;
break;
case atype_int_immediate: {
const struct immediate_info *imm = a->tinfo;
k5_asn1_encode_int(buf, imm->val);
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = ASN1_INTEGER;
break;
}
default:
assert(a->type > atype_min);
assert(a->type < atype_max);
abort();
}
return 0;
}
static krb5_error_code
encode_atype_and_tag(asn1buf *buf, const void *val, const struct atype_info *a)
{
taginfo t;
krb5_error_code ret;
size_t oldcount = buf->count;
ret = encode_atype(buf, val, a, &t);
if (ret)
return ret;
ret = make_tag(buf, &t, buf->count - oldcount);
if (ret)
return ret;
return 0;
}
/*
* Encode an object and count according to a cntype_info structure. val is a
* pointer to the object being encoded, which in most cases is itself a
* pointer (but is a union in the cntype_choice case).
*/
static krb5_error_code
encode_cntype(asn1buf *buf, const void *val, size_t count,
const struct cntype_info *c, taginfo *tag_out)
{
krb5_error_code ret;
switch (c->type) {
case cntype_string: {
const struct string_info *string = c->tinfo;
assert(string->enc != NULL);
ret = string->enc(buf, val, count);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = PRIMITIVE;
tag_out->tagnum = string->tagval;
break;
}
case cntype_der:
return split_der(buf, val, count, tag_out);
case cntype_seqof: {
const struct atype_info *a = c->tinfo;
const struct ptr_info *ptr = a->tinfo;
assert(a->type == atype_ptr);
val = LOADPTR(val, ptr);
ret = encode_sequence_of(buf, count, val, ptr->basetype);
if (ret)
return ret;
tag_out->asn1class = UNIVERSAL;
tag_out->construction = CONSTRUCTED;
tag_out->tagnum = ASN1_SEQUENCE;
break;
}
case cntype_choice: {
const struct choice_info *choice = c->tinfo;
if (count >= choice->n_options)
return ASN1_MISSING_FIELD;
return encode_atype(buf, val, choice->options[count], tag_out);
}
default:
assert(c->type > cntype_min);
assert(c->type < cntype_max);
abort();
}
return 0;
}
static krb5_error_code
encode_sequence(asn1buf *buf, const void *val, const struct seq_info *seq)
{
krb5_error_code ret;
size_t i;
for (i = seq->n_fields; i > 0; i--) {
ret = encode_atype_and_tag(buf, val, seq->fields[i - 1]);
if (ret == ASN1_OMITTED)
continue;
else if (ret != 0)
return ret;
}
return 0;
}
static krb5_error_code
encode_sequence_of(asn1buf *buf, size_t seqlen, const void *val,
const struct atype_info *eltinfo)
{
krb5_error_code ret;
size_t i;
const void *eltptr;
assert(eltinfo->size != 0);
for (i = seqlen; i > 0; i--) {
eltptr = (const char *)val + (i - 1) * eltinfo->size;
ret = encode_atype_and_tag(buf, eltptr, eltinfo);
if (ret)
return ret;
}
return 0;
}
/**** Functions for freeing C objects based on type info ****/
static void free_atype_ptr(const struct atype_info *a, void *val);
static void free_sequence(const struct seq_info *seq, void *val);
static void free_sequence_of(const struct atype_info *eltinfo, void *val,
size_t count);
static void free_cntype(const struct cntype_info *a, void *val, size_t count);
/*
* Free a C object according to a type description. Do not free pointers at
* the first level; they may be referenced by other fields of a sequence, and
* will be freed by free_atype_ptr in a second pass.
*/
static void
free_atype(const struct atype_info *a, void *val)
{
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
if (fn->free_func != NULL)
fn->free_func(val);
break;
}
case atype_sequence:
free_sequence(a->tinfo, val);
break;
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
void *ptr = LOADPTR(val, ptrinfo);
if (ptr != NULL) {
free_atype(ptrinfo->basetype, ptr);
free_atype_ptr(ptrinfo->basetype, ptr);
}
break;
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
free_atype(off->basetype, (char *)val + off->dataoff);
break;
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
free_atype(opt->basetype, val);
break;
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
void *dataptr = (char *)val + counted->dataoff;
size_t count;
if (load_count(val, counted, &count) == 0)
free_cntype(counted->basetype, dataptr, count);
break;
}
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of: {
size_t count = get_nullterm_sequence_len(val, a->tinfo);
free_sequence_of(a->tinfo, val, count);
break;
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
free_atype(tag->basetype, val);
break;
}
case atype_bool:
case atype_int:
case atype_uint:
case atype_int_immediate:
break;
default:
abort();
}
}
static void
free_atype_ptr(const struct atype_info *a, void *val)
{
switch (a->type) {
case atype_fn:
case atype_sequence:
case atype_counted:
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
case atype_bool:
case atype_int:
case atype_uint:
case atype_int_immediate:
break;
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
void *ptr = LOADPTR(val, ptrinfo);
free(ptr);
STOREPTR(NULL, ptrinfo, val);
break;
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
free_atype_ptr(off->basetype, (char *)val + off->dataoff);
break;
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
free_atype_ptr(opt->basetype, val);
break;
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
free_atype_ptr(tag->basetype, val);
break;
}
default:
abort();
}
}
static void
free_cntype(const struct cntype_info *c, void *val, size_t count)
{
switch (c->type) {
case cntype_string:
case cntype_der:
free(*(char **)val);
*(char **)val = NULL;
break;
case cntype_seqof: {
const struct atype_info *a = c->tinfo;
const struct ptr_info *ptrinfo = a->tinfo;
void *seqptr = LOADPTR(val, ptrinfo);
free_sequence_of(ptrinfo->basetype, seqptr, count);
free(seqptr);
STOREPTR(NULL, ptrinfo, val);
break;
}
case cntype_choice: {
const struct choice_info *choice = c->tinfo;
if (count < choice->n_options) {
free_atype(choice->options[count], val);
free_atype_ptr(choice->options[count], val);
}
break;
}
default:
abort();
}
}
static void
free_sequence(const struct seq_info *seq, void *val)
{
size_t i;
for (i = 0; i < seq->n_fields; i++)
free_atype(seq->fields[i], val);
for (i = 0; i < seq->n_fields; i++)
free_atype_ptr(seq->fields[i], val);
}
static void
free_sequence_of(const struct atype_info *eltinfo, void *val, size_t count)
{
void *eltptr;
assert(eltinfo->size != 0);
while (count-- > 0) {
eltptr = (char *)val + count * eltinfo->size;
free_atype(eltinfo, eltptr);
free_atype_ptr(eltinfo, eltptr);
}
}
/**** Functions for decoding objects based on type info ****/
/* Return nonzero if t is an expected tag for an ASN.1 object of type a. */
static int
check_atype_tag(const struct atype_info *a, const taginfo *t)
{
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
assert(fn->check_tag != NULL);
return fn->check_tag(t);
}
case atype_sequence:
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
return (t->asn1class == UNIVERSAL && t->construction == CONSTRUCTED &&
t->tagnum == ASN1_SEQUENCE);
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
return check_atype_tag(ptrinfo->basetype, t);
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
return check_atype_tag(off->basetype, t);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
return check_atype_tag(opt->basetype, t);
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
switch (counted->basetype->type) {
case cntype_string: {
const struct string_info *string = counted->basetype->tinfo;
return (t->asn1class == UNIVERSAL &&
t->construction == PRIMITIVE &&
t->tagnum == string->tagval);
}
case cntype_seqof:
return (t->asn1class == UNIVERSAL &&
t->construction == CONSTRUCTED &&
t->tagnum == ASN1_SEQUENCE);
case cntype_der:
/*
* We treat any tag as matching a stored DER encoding. In some
* cases we know what the tag should be; in others, we truly want
* to accept any tag. If it ever becomes an issue, we could add
* optional tag info to the type and check it here.
*/
return 1;
case cntype_choice:
/*
* ASN.1 choices may or may not be extensible. For now, we treat
* all choices as extensible and match any tag. We should consider
* modeling whether choices are extensible before making the
* encoder visible to plugins.
*/
return 1;
default:
abort();
}
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
/* NOTE: Doesn't check construction bit for implicit tags. */
if (!tag->implicit && t->construction != tag->construction)
return 0;
return (t->asn1class == tag->tagtype && t->tagnum == tag->tagval);
}
case atype_bool:
return (t->asn1class == UNIVERSAL && t->construction == PRIMITIVE &&
t->tagnum == ASN1_BOOLEAN);
case atype_int:
case atype_uint:
case atype_int_immediate:
return (t->asn1class == UNIVERSAL && t->construction == PRIMITIVE &&
t->tagnum == ASN1_INTEGER);
default:
abort();
}
}
static krb5_error_code
decode_cntype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct cntype_info *c, void *val, size_t *count_out);
static krb5_error_code
decode_atype_to_ptr(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *basetype, void **ptr_out);
static krb5_error_code
decode_sequence(const uint8_t *asn1, size_t len, const struct seq_info *seq,
void *val);
static krb5_error_code
decode_sequence_of(const uint8_t *asn1, size_t len,
const struct atype_info *elemtype, void **seq_out,
size_t *count_out);
/* Given the enclosing tag t, decode from asn1/len the contents of the ASN.1
* type specified by a, placing the result into val (caller-allocated). */
static krb5_error_code
decode_atype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *a, void *val)
{
krb5_error_code ret;
switch (a->type) {
case atype_fn: {
const struct fn_info *fn = a->tinfo;
assert(fn->dec != NULL);
return fn->dec(t, asn1, len, val);
}
case atype_sequence:
return decode_sequence(asn1, len, a->tinfo, val);
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
void *ptr = LOADPTR(val, ptrinfo);
assert(ptrinfo->basetype != NULL);
if (ptr != NULL) {
/* Container was already allocated by a previous sequence field. */
return decode_atype(t, asn1, len, ptrinfo->basetype, ptr);
} else {
ret = decode_atype_to_ptr(t, asn1, len, ptrinfo->basetype, &ptr);
if (ret)
return ret;
STOREPTR(ptr, ptrinfo, val);
break;
}
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
assert(off->basetype != NULL);
return decode_atype(t, asn1, len, off->basetype,
(char *)val + off->dataoff);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
return decode_atype(t, asn1, len, opt->basetype, val);
}
case atype_counted: {
const struct counted_info *counted = a->tinfo;
void *dataptr = (char *)val + counted->dataoff;
size_t count;
assert(counted->basetype != NULL);
ret = decode_cntype(t, asn1, len, counted->basetype, dataptr, &count);
if (ret)
return ret;
return store_count(count, counted, val);
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
taginfo inner_tag;
const taginfo *tp = t;
const uint8_t *rem;
size_t rlen;
if (!tag->implicit) {
ret = get_tag(asn1, len, &inner_tag, &asn1, &len, &rem, &rlen, 0);
if (ret)
return ret;
/* Note: we don't check rlen (it should be 0). */
tp = &inner_tag;
if (!check_atype_tag(tag->basetype, tp))
return ASN1_BAD_ID;
}
return decode_atype(tp, asn1, len, tag->basetype, val);
}
case atype_bool: {
intmax_t intval;
ret = k5_asn1_decode_bool(asn1, len, &intval);
if (ret)
return ret;
return store_int(intval, a->size, val);
}
case atype_int: {
intmax_t intval;
ret = k5_asn1_decode_int(asn1, len, &intval);
if (ret)
return ret;
return store_int(intval, a->size, val);
}
case atype_uint: {
uintmax_t intval;
ret = k5_asn1_decode_uint(asn1, len, &intval);
if (ret)
return ret;
return store_uint(intval, a->size, val);
}
case atype_int_immediate: {
const struct immediate_info *imm = a->tinfo;
intmax_t intval;
ret = k5_asn1_decode_int(asn1, len, &intval);
if (ret)
return ret;
if (intval != imm->val && imm->err != 0)
return imm->err;
break;
}
default:
/* Null-terminated sequence types are handled in decode_atype_to_ptr,
* since they create variable-sized objects. */
assert(a->type != atype_nullterm_sequence_of);
assert(a->type != atype_nonempty_nullterm_sequence_of);
assert(a->type > atype_min);
assert(a->type < atype_max);
abort();
}
return 0;
}
/*
* Given the enclosing tag t, decode from asn1/len the contents of the
* ASN.1 type described by c, placing the counted result into val/count_out.
* If the resulting count should be -1 (for an unknown union distinguisher),
* set *count_out to SIZE_MAX.
*/
static krb5_error_code
decode_cntype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct cntype_info *c, void *val, size_t *count_out)
{
krb5_error_code ret;
switch (c->type) {
case cntype_string: {
const struct string_info *string = c->tinfo;
assert(string->dec != NULL);
return string->dec(asn1, len, val, count_out);
}
case cntype_der:
return store_der(t, asn1, len, val, count_out);
case cntype_seqof: {
const struct atype_info *a = c->tinfo;
const struct ptr_info *ptrinfo = a->tinfo;
void *seq;
assert(a->type == atype_ptr);
ret = decode_sequence_of(asn1, len, ptrinfo->basetype, &seq,
count_out);
if (ret)
return ret;
STOREPTR(seq, ptrinfo, val);
break;
}
case cntype_choice: {
const struct choice_info *choice = c->tinfo;
size_t i;
for (i = 0; i < choice->n_options; i++) {
if (check_atype_tag(choice->options[i], t)) {
ret = decode_atype(t, asn1, len, choice->options[i], val);
if (ret)
return ret;
*count_out = i;
return 0;
}
}
/* SIZE_MAX will be stored as -1 in the distinguisher. If we start
* modeling non-extensible choices we should check that here. */
*count_out = SIZE_MAX;
break;
}
default:
assert(c->type > cntype_min);
assert(c->type < cntype_max);
abort();
}
return 0;
}
/* Add a null pointer to the end of a sequence. ptr is consumed on success
* (to be replaced by *ptr_out), left alone on failure. */
static krb5_error_code
null_terminate(const struct atype_info *eltinfo, void *ptr, size_t count,
void **ptr_out)
{
const struct ptr_info *ptrinfo = eltinfo->tinfo;
void *endptr;
assert(eltinfo->type == atype_ptr);
ptr = realloc(ptr, (count + 1) * eltinfo->size);
if (ptr == NULL)
return ENOMEM;
endptr = (char *)ptr + count * eltinfo->size;
STOREPTR(NULL, ptrinfo, endptr);
*ptr_out = ptr;
return 0;
}
static krb5_error_code
decode_atype_to_ptr(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *a, void **ptr_out)
{
krb5_error_code ret;
void *ptr;
size_t count;
*ptr_out = NULL;
switch (a->type) {
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
ret = decode_sequence_of(asn1, len, a->tinfo, &ptr, &count);
if (ret)
return ret;
ret = null_terminate(a->tinfo, ptr, count, &ptr);
if (ret) {
free_sequence_of(a->tinfo, ptr, count);
return ret;
}
/* Historically we do not enforce non-emptiness of sequences when
* decoding, even when it is required by the ASN.1 type. */
break;
default:
ptr = calloc(a->size, 1);
if (ptr == NULL)
return ENOMEM;
ret = decode_atype(t, asn1, len, a, ptr);
if (ret) {
free(ptr);
return ret;
}
break;
}
*ptr_out = ptr;
return 0;
}
/* Initialize a C object when the corresponding ASN.1 type was omitted within a
* sequence. If the ASN.1 type is not optional, return ASN1_MISSING_FIELD. */
static krb5_error_code
omit_atype(const struct atype_info *a, void *val)
{
switch (a->type)
{
case atype_fn:
case atype_sequence:
case atype_nullterm_sequence_of:
case atype_nonempty_nullterm_sequence_of:
case atype_counted:
case atype_bool:
case atype_int:
case atype_uint:
case atype_int_immediate:
return ASN1_MISSING_FIELD;
case atype_ptr: {
const struct ptr_info *ptrinfo = a->tinfo;
return omit_atype(ptrinfo->basetype, val);
}
case atype_offset: {
const struct offset_info *off = a->tinfo;
return omit_atype(off->basetype, (char *)val + off->dataoff);
}
case atype_tagged_thing: {
const struct tagged_info *tag = a->tinfo;
return omit_atype(tag->basetype, val);
}
case atype_optional: {
const struct optional_info *opt = a->tinfo;
if (opt->init != NULL)
opt->init(val);
return 0;
}
default:
abort();
}
}
/* Decode an ASN.1 sequence into a C object. */
static krb5_error_code
decode_sequence(const uint8_t *asn1, size_t len, const struct seq_info *seq,
void *val)
{
krb5_error_code ret;
const uint8_t *contents;
size_t i, j, clen;
taginfo t;
assert(seq->n_fields > 0);
for (i = 0; i < seq->n_fields; i++) {
if (len == 0)
break;
ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len, 0);
if (ret)
goto error;
/*
* Find the applicable sequence field. This logic is a little
* oversimplified; we could match an element to an optional extensible
* choice or optional stored-DER type when we ought to match a
* subsequent non-optional field. But it's unwise and (hopefully) very
* rare for ASN.1 modules to require such precision.
*/
for (; i < seq->n_fields; i++) {
if (check_atype_tag(seq->fields[i], &t))
break;
ret = omit_atype(seq->fields[i], val);
if (ret)
goto error;
}
/* We currently model all sequences as extensible. We should consider
* changing this before making the encoder visible to plugins. */
if (i == seq->n_fields)
break;
ret = decode_atype(&t, contents, clen, seq->fields[i], val);
if (ret)
goto error;
}
/* Initialize any fields in the C object which were not accounted for in
* the sequence. Error out if any of them aren't optional. */
for (; i < seq->n_fields; i++) {
ret = omit_atype(seq->fields[i], val);
if (ret)
goto error;
}
return 0;
error:
/* Free what we've decoded so far. Free pointers in a second pass in
* case multiple fields refer to the same pointer. */
for (j = 0; j < i; j++)
free_atype(seq->fields[j], val);
for (j = 0; j < i; j++)
free_atype_ptr(seq->fields[j], val);
return ret;
}
static krb5_error_code
decode_sequence_of(const uint8_t *asn1, size_t len,
const struct atype_info *elemtype, void **seq_out,
size_t *count_out)
{
krb5_error_code ret;
void *seq = NULL, *elem, *newseq;
const uint8_t *contents;
size_t clen, count = 0;
taginfo t;
*seq_out = NULL;
*count_out = 0;
while (len > 0) {
ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len, 0);
if (ret)
goto error;
if (!check_atype_tag(elemtype, &t)) {
ret = ASN1_BAD_ID;
goto error;
}
newseq = realloc(seq, (count + 1) * elemtype->size);
if (newseq == NULL) {
ret = ENOMEM;
goto error;
}
seq = newseq;
elem = (char *)seq + count * elemtype->size;
memset(elem, 0, elemtype->size);
ret = decode_atype(&t, contents, clen, elemtype, elem);
if (ret)
goto error;
count++;
}
*seq_out = seq;
*count_out = count;
return 0;
error:
free_sequence_of(elemtype, seq, count);
free(seq);
return ret;
}
/* These three entry points are only needed for the kdc_req_body hack and may
* go away at some point. Define them here so we can use short names above. */
krb5_error_code
k5_asn1_encode_atype(asn1buf *buf, const void *val, const struct atype_info *a,
taginfo *tag_out)
{
return encode_atype(buf, val, a, tag_out);
}
krb5_error_code
k5_asn1_decode_atype(const taginfo *t, const uint8_t *asn1, size_t len,
const struct atype_info *a, void *val)
{
return decode_atype(t, asn1, len, a, val);
}
krb5_error_code
k5_asn1_full_encode(const void *rep, const struct atype_info *a,
krb5_data **code_out)
{
krb5_error_code ret;
asn1buf buf;
krb5_data *d;
uint8_t *bytes;
*code_out = NULL;
if (rep == NULL)
return ASN1_MISSING_FIELD;
/* Make a first pass over rep to count the encoding size. */
buf.ptr = NULL;
buf.count = 0;
ret = encode_atype_and_tag(&buf, rep, a);
if (ret)
return ret;
/* Allocate space for the encoding. */
bytes = malloc(buf.count + 1);
if (bytes == NULL)
return ENOMEM;
bytes[buf.count] = 0;
/* Make a second pass over rep to encode it. buf.ptr moves backwards as we
* encode, and will always exactly return to the base. */
buf.ptr = bytes + buf.count;
buf.count = 0;
ret = encode_atype_and_tag(&buf, rep, a);
if (ret) {
free(bytes);
return ret;
}
assert(buf.ptr == bytes);
/* Create the output data object. */
*code_out = malloc(sizeof(*d));
if (*code_out == NULL) {
free(bytes);
return ENOMEM;
}
**code_out = make_data(bytes, buf.count);
return 0;
}
krb5_error_code
k5_asn1_full_decode(const krb5_data *code, const struct atype_info *a,
void **retrep)
{
krb5_error_code ret;
const uint8_t *contents, *remainder;
size_t clen, rlen;
taginfo t;
*retrep = NULL;
ret = get_tag((uint8_t *)code->data, code->length, &t, &contents,
&clen, &remainder, &rlen, 0);
if (ret)
return ret;
/* rlen should be 0, but we don't check it (and due to padding in
* non-length-preserving enctypes, it will sometimes be nonzero). */
if (!check_atype_tag(a, &t))
return ASN1_BAD_ID;
return decode_atype_to_ptr(&t, contents, clen, a, retrep);
}
|
6,878,359,330,264 | #ifndef _LINUX__INIT_TASK_H
#define _LINUX__INIT_TASK_H
#include <linux/rcupdate.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/lockdep.h>
#include <linux/ftrace.h>
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <net/net_namespace.h>
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
.signal = {{0}}}, \
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
}
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = NULL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
}
extern struct group_info init_groups;
#define INIT_STRUCT_PID { \
.count = ATOMIC_INIT(1), \
.tasks = { \
{ .first = &init_task.pids[PIDTYPE_PID].node }, \
{ .first = &init_task.pids[PIDTYPE_PGID].node }, \
{ .first = &init_task.pids[PIDTYPE_SID].node }, \
}, \
.level = 0, \
.numbers = { { \
.nr = 0, \
.ns = &init_pid_ns, \
.pid_chain = { .next = NULL, .pprev = NULL }, \
}, } \
}
#define INIT_PID_LINK(type) \
{ \
.node = { \
.next = NULL, \
.pprev = &init_struct_pid.tasks[type].first, \
}, \
.pid = &init_struct_pid, \
}
#ifdef CONFIG_AUDITSYSCALL
#define INIT_IDS \
.loginuid = -1, \
.sessionid = -1,
#else
#define INIT_IDS
#endif
/*
* Because of the reduced scope of CAP_SETPCAP when filesystem
* capabilities are in effect, it is safe to allow CAP_SETPCAP to
* be available in the default configuration.
*/
# define CAP_INIT_BSET CAP_FULL_SET
#ifdef CONFIG_TREE_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special = 0, \
.rcu_blocked_node = NULL, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
extern struct cred init_cred;
#ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
# define INIT_PERF_EVENTS(tsk)
#endif
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
.state = 0, \
.stack = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, \
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
.real_cred = &init_cred, \
.cred = &init_cred, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.nsproxy = &init_nsproxy, \
.pending = { \
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
}
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
LIST_HEAD_INIT(cpu_timers[0]), \
LIST_HEAD_INIT(cpu_timers[1]), \
LIST_HEAD_INIT(cpu_timers[2]), \
}
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data.init_task")))
#endif
| #ifndef _LINUX__INIT_TASK_H
#define _LINUX__INIT_TASK_H
#include <linux/rcupdate.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/lockdep.h>
#include <linux/ftrace.h>
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <net/net_namespace.h>
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
.signal = {{0}}}, \
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
}
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = NULL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
}
extern struct group_info init_groups;
#define INIT_STRUCT_PID { \
.count = ATOMIC_INIT(1), \
.tasks = { \
{ .first = &init_task.pids[PIDTYPE_PID].node }, \
{ .first = &init_task.pids[PIDTYPE_PGID].node }, \
{ .first = &init_task.pids[PIDTYPE_SID].node }, \
}, \
.level = 0, \
.numbers = { { \
.nr = 0, \
.ns = &init_pid_ns, \
.pid_chain = { .next = NULL, .pprev = NULL }, \
}, } \
}
#define INIT_PID_LINK(type) \
{ \
.node = { \
.next = NULL, \
.pprev = &init_struct_pid.tasks[type].first, \
}, \
.pid = &init_struct_pid, \
}
#ifdef CONFIG_AUDITSYSCALL
#define INIT_IDS \
.loginuid = -1, \
.sessionid = -1,
#else
#define INIT_IDS
#endif
/*
* Because of the reduced scope of CAP_SETPCAP when filesystem
* capabilities are in effect, it is safe to allow CAP_SETPCAP to
* be available in the default configuration.
*/
# define CAP_INIT_BSET CAP_FULL_SET
#ifdef CONFIG_TREE_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special = 0, \
.rcu_blocked_node = NULL, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
extern struct cred init_cred;
#ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
# define INIT_PERF_EVENTS(tsk)
#endif
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
.state = 0, \
.stack = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, \
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
.real_cred = &init_cred, \
.cred = &init_cred, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.nsproxy = &init_nsproxy, \
.pending = { \
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
}
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
LIST_HEAD_INIT(cpu_timers[0]), \
LIST_HEAD_INIT(cpu_timers[1]), \
LIST_HEAD_INIT(cpu_timers[2]), \
}
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data.init_task")))
#endif
|
204,243,626,059,347 | #ifndef _LINUX__INIT_TASK_H
#define _LINUX__INIT_TASK_H
#include <linux/rcupdate.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/lockdep.h>
#include <linux/ftrace.h>
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <net/net_namespace.h>
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
.signal = {{0}}}, \
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
}
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = NULL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
}
extern struct group_info init_groups;
#define INIT_STRUCT_PID { \
.count = ATOMIC_INIT(1), \
.tasks = { \
{ .first = &init_task.pids[PIDTYPE_PID].node }, \
{ .first = &init_task.pids[PIDTYPE_PGID].node }, \
{ .first = &init_task.pids[PIDTYPE_SID].node }, \
}, \
.level = 0, \
.numbers = { { \
.nr = 0, \
.ns = &init_pid_ns, \
.pid_chain = { .next = NULL, .pprev = NULL }, \
}, } \
}
#define INIT_PID_LINK(type) \
{ \
.node = { \
.next = NULL, \
.pprev = &init_struct_pid.tasks[type].first, \
}, \
.pid = &init_struct_pid, \
}
#ifdef CONFIG_AUDITSYSCALL
#define INIT_IDS \
.loginuid = -1, \
.sessionid = -1,
#else
#define INIT_IDS
#endif
/*
* Because of the reduced scope of CAP_SETPCAP when filesystem
* capabilities are in effect, it is safe to allow CAP_SETPCAP to
* be available in the default configuration.
*/
# define CAP_INIT_BSET CAP_FULL_SET
#ifdef CONFIG_TREE_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special = 0, \
.rcu_blocked_node = NULL, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
extern struct cred init_cred;
#ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
# define INIT_PERF_EVENTS(tsk)
#endif
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
.state = 0, \
.stack = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, \
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
.real_cred = &init_cred, \
.cred = &init_cred, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.nsproxy = &init_nsproxy, \
.pending = { \
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
}
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
LIST_HEAD_INIT(cpu_timers[0]), \
LIST_HEAD_INIT(cpu_timers[1]), \
LIST_HEAD_INIT(cpu_timers[2]), \
}
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data.init_task")))
#endif
| #ifndef _LINUX__INIT_TASK_H
#define _LINUX__INIT_TASK_H
#include <linux/rcupdate.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/lockdep.h>
#include <linux/ftrace.h>
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
#include <linux/securebits.h>
#include <net/net_namespace.h>
extern struct files_struct init_files;
extern struct fs_struct init_fs;
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
.shared_pending = { \
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
.signal = {{0}}}, \
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
}
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = NULL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
}
extern struct group_info init_groups;
#define INIT_STRUCT_PID { \
.count = ATOMIC_INIT(1), \
.tasks = { \
{ .first = NULL }, \
{ .first = NULL }, \
{ .first = NULL }, \
}, \
.level = 0, \
.numbers = { { \
.nr = 0, \
.ns = &init_pid_ns, \
.pid_chain = { .next = NULL, .pprev = NULL }, \
}, } \
}
#define INIT_PID_LINK(type) \
{ \
.node = { \
.next = NULL, \
.pprev = NULL, \
}, \
.pid = &init_struct_pid, \
}
#ifdef CONFIG_AUDITSYSCALL
#define INIT_IDS \
.loginuid = -1, \
.sessionid = -1,
#else
#define INIT_IDS
#endif
/*
* Because of the reduced scope of CAP_SETPCAP when filesystem
* capabilities are in effect, it is safe to allow CAP_SETPCAP to
* be available in the default configuration.
*/
# define CAP_INIT_BSET CAP_FULL_SET
#ifdef CONFIG_TREE_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special = 0, \
.rcu_blocked_node = NULL, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
extern struct cred init_cred;
#ifdef CONFIG_PERF_EVENTS
# define INIT_PERF_EVENTS(tsk) \
.perf_event_mutex = \
__MUTEX_INITIALIZER(tsk.perf_event_mutex), \
.perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
#else
# define INIT_PERF_EVENTS(tsk)
#endif
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
.state = 0, \
.stack = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
.se = { \
.group_node = LIST_HEAD_INIT(tsk.se.group_node), \
}, \
.rt = { \
.run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
.time_slice = HZ, \
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
.real_cred = &init_cred, \
.cred = &init_cred, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
.signal = &init_signals, \
.sighand = &init_sighand, \
.nsproxy = &init_nsproxy, \
.pending = { \
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
}
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
LIST_HEAD_INIT(cpu_timers[0]), \
LIST_HEAD_INIT(cpu_timers[1]), \
LIST_HEAD_INIT(cpu_timers[2]), \
}
/* Attach to the init_task data structure for proper alignment */
#define __init_task_data __attribute__((__section__(".data.init_task")))
#endif
|
97,473,752,816,411 | /*
* linux/kernel/fork.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'fork.c' contains the help-routines for the 'fork' system call
* (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <trace/events/sched.h>
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
return lockdep_is_held(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
#endif /* #ifdef CONFIG_PROVE_RCU */
int nr_processes(void)
{
int cpu;
int total = 0;
for_each_possible_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
}
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static struct kmem_cache *task_struct_cachep;
#endif
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
}
static inline void free_thread_info(struct thread_info *ti)
{
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
struct kmem_cache *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
struct kmem_cache *files_cachep;
/* SLAB cache for fs_struct structures (tsk->fs) */
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void put_signal_struct(struct signal_struct *sig)
{
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
/*
* macro override instead of weak attribute alias, to workaround
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
*/
#ifndef arch_task_cache_init
#define arch_task_cache_init()
#endif
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
arch_task_cache_init();
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
* of memory.
*/
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
}
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{
*dst = *src;
return 0;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int err;
prepare_to_copy(orig);
tsk = alloc_task_struct();
if (!tsk)
return NULL;
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
err = arch_dup_task_struct(tsk, orig);
if (err)
goto out;
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
account_kernel_stack(ti, 1);
return tsk;
out:
free_thread_info(ti);
free_task_struct(tsk);
return NULL;
}
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = file->f_mapping;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
spin_lock(&mapping->i_mmap_lock);
if (tmp->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
vma_prio_tree_add(tmp, mpnt);
flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_mmap_lock);
}
/*
* Clear hugetlb-related page reserves for children. This only
* affects MAP_PRIVATE mappings. Faults generated by the child
* are not guaranteed to succeed, even if read-only
*/
if (is_vm_hugetlb_page(tmp))
reset_vma_resv_huge_pages(tmp);
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct * mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
static inline void mm_free_pgd(struct mm_struct * mm)
{
pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s)
{
default_dump_filter =
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
MMF_DUMP_FILTER_MASK;
return 1;
}
__setup("coredump_filter=", coredump_filter_setup);
#include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
spin_lock_init(&mm->ioctx_lock);
INIT_HLIST_HEAD(&mm->ioctx_list);
#endif
}
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
mmu_notifier_mm_init(mm);
return mm;
}
free_mm(mm);
return NULL;
}
/*
* Allocate and initialize an mm_struct.
*/
struct mm_struct * mm_alloc(void)
{
struct mm_struct * mm;
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm, current);
}
return mm;
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
ksm_exit(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
*
* mm_release is called after a mm_struct has been removed
* from the current process.
*
* This difference is important for error handling, when we
* only half set up a mm_struct for a new process and need to restore
* the old one. Because we mmput the new mm_struct before
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
struct completion *vfork_done = tsk->vfork_done;
/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
#endif
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
/* notify parent sleeping on vfork() */
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) &&
atomic_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
put_user(0, tsk->clear_child_tid);
sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1, NULL, NULL, 0);
}
tsk->clear_child_tid = NULL;
}
}
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
dup_mm_exe_file(oldmm, mm);
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
if (mm->binfmt && !try_module_get(mm->binfmt->module))
goto free_pt;
return mm;
free_pt:
/* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL;
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
#ifdef CONFIG_DETECT_HUNG_TASK
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
write_lock(&fs->lock);
if (fs->in_exec) {
write_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
write_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
}
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
struct files_struct *oldf, *newf;
int error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
goto out;
if (clone_flags & CLONE_FILES) {
atomic_inc(&oldf->count);
goto out;
}
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
tsk->files = newf;
error = 0;
out:
return error;
}
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
#ifdef CONFIG_BLOCK
struct io_context *ioc = current->io_context;
if (!ioc)
return 0;
/*
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
tsk->io_context = ioc_task_link(ioc);
if (unlikely(!tsk->io_context))
return -ENOMEM;
} else if (ioprio_valid(ioc->ioprio)) {
tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
if (unlikely(!tsk->io_context))
return -ENOMEM;
tsk->io_context->ioprio = ioc->ioprio;
}
#endif
return 0;
}
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
if (atomic_dec_and_test(&sighand->count))
kmem_cache_free(sighand_cachep, sighand);
}
/*
* Initialize POSIX timer handling for a thread group.
*/
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
unsigned long cpu_limit;
/* Thread group counters. */
thread_group_cputime_init(sig);
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1;
}
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD)
return 0;
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE;
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
sig->oom_adj = current->signal->oom_adj;
return 0;
}
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~PF_SUPERPRIV;
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
return task_pid_vnr(current);
}
static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
#ifdef CONFIG_MM_OWNER
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
mm->owner = p;
}
#endif /* CONFIG_MM_OWNER */
/*
* Initialize POSIX timer handling for a single task.
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = cputime_zero;
tsk->cputime_expires.virt_exp = cputime_zero;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
/*
* Siblings of global init remain as zombies on exit since they are
* not reaped by their parent (swapper). To solve this and to avoid
* multi-rooted process trees, prevent global and container-inits
* from creating siblings.
*/
if ((clone_flags & CLONE_PARENT) &&
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
rcu_copy_process(p);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
p->default_timer_slack_ns = current->timer_slack_ns;
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
p->cpuset_mem_spread_rotor = node_random(p->mems_allowed);
p->cpuset_slab_spread_rotor = node_random(p->mems_allowed);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_pid;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing and stepping should be turned off in the
* child regardless of CLONE_PTRACE.
*/
user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
perf_event_fork(p);
return p;
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
if (p->io_context)
exit_io_context(p);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
perf_event_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
}
noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
}
struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
&init_struct_pid, 0);
if (!IS_ERR(task))
init_idle(task, cpu);
return task;
}
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
long nr;
/*
* Do some preliminary argument and permissions checking before we
* actually start allocating stuff
*/
if (clone_flags & CLONE_NEWUSER) {
if (clone_flags & CLONE_THREAD)
return -EINVAL;
/* hopefully this check will go away when userns support is
* complete
*/
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
!capable(CAP_SETGID))
return -EPERM;
}
/*
* We hope to recycle these flags after 2.6.26
*/
if (unlikely(clone_flags & CLONE_STOPPED)) {
static int __read_mostly count = 100;
if (count > 0 && printk_ratelimit()) {
char comm[TASK_COMM_LEN];
count--;
printk(KERN_INFO "fork(): process `%s' used deprecated "
"clone flags 0x%lx\n",
get_task_comm(comm, current),
clone_flags & CLONE_STOPPED);
}
}
/*
* When called from kernel_thread, don't do user tracing stuff.
*/
if (likely(user_mode(regs)))
trace = tracehook_prepare_clone(clone_flags);
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
trace_sched_process_fork(current, p);
nr = task_pid_vnr(p);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
}
audit_finish_fork(p);
tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
* hasn't gotten to tracehook_report_clone() yet. Now we
* clear it and set the child going.
*/
p->flags &= ~PF_STARTING;
if (unlikely(clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
__set_task_state(p, TASK_STOPPED);
} else {
wake_up_new_task(p, clone_flags);
}
tracehook_report_clone_complete(trace, regs,
clone_flags, nr, p);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
tracehook_report_vfork_done(p, nr);
}
} else {
nr = PTR_ERR(p);
}
return nr;
}
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
static void sighand_ctor(void *data)
{
struct sighand_struct *sighand = data;
spin_lock_init(&sighand->siglock);
init_waitqueue_head(&sighand->signalfd_wqh);
}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
/*
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
*/
static void check_unshare_flags(unsigned long *flags_ptr)
{
/*
* If unsharing a thread from a thread group, must also
* unshare vm.
*/
if (*flags_ptr & CLONE_THREAD)
*flags_ptr |= CLONE_VM;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (*flags_ptr & CLONE_VM)
*flags_ptr |= CLONE_SIGHAND;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (*flags_ptr & CLONE_NEWNS)
*flags_ptr |= CLONE_FS;
}
/*
* Unsharing of tasks created with CLONE_THREAD is not supported yet
*/
static int unshare_thread(unsigned long unshare_flags)
{
if (unshare_flags & CLONE_THREAD)
return -EINVAL;
return 0;
}
/*
* Unshare the filesystem structure if it is being shared
*/
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
if (!(unshare_flags & CLONE_FS) || !fs)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
if (fs->users == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
if (!*new_fsp)
return -ENOMEM;
return 0;
}
/*
* Unsharing of sighand is not supported yet
*/
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
{
struct sighand_struct *sigh = current->sighand;
if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
return -EINVAL;
else
return 0;
}
/*
* Unshare vm if it is being shared
*/
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
{
struct mm_struct *mm = current->mm;
if ((unshare_flags & CLONE_VM) &&
(mm && atomic_read(&mm->mm_users) > 1)) {
return -EINVAL;
}
return 0;
}
/*
* Unshare file descriptor table if it is being shared
*/
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
int error = 0;
if ((unshare_flags & CLONE_FILES) &&
(fd && atomic_read(&fd->count) > 1)) {
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
}
return 0;
}
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
struct sighand_struct *new_sigh = NULL;
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
check_unshare_flags(&unshare_flags);
/* Return -EINVAL for all unsupported flags */
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
goto bad_unshare_cleanup_thread;
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
goto bad_unshare_cleanup_fs;
if ((err = unshare_vm(unshare_flags, &new_mm)))
goto bad_unshare_cleanup_sigh;
if ((err = unshare_fd(unshare_flags, &new_fd)))
goto bad_unshare_cleanup_vm;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_fs)))
goto bad_unshare_cleanup_fd;
if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (new_nsproxy) {
switch_task_namespaces(current, new_nsproxy);
new_nsproxy = NULL;
}
task_lock(current);
if (new_fs) {
fs = current->fs;
write_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
write_unlock(&fs->lock);
}
if (new_mm) {
mm = current->mm;
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
activate_mm(active_mm, new_mm);
new_mm = mm;
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_vm:
if (new_mm)
mmput(new_mm);
bad_unshare_cleanup_sigh:
if (new_sigh)
if (atomic_dec_and_test(&new_sigh->count))
kmem_cache_free(sighand_cachep, new_sigh);
bad_unshare_cleanup_fs:
if (new_fs)
free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
return err;
}
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
* the exec layer of the kernel.
*/
int unshare_files(struct files_struct **displaced)
{
struct task_struct *task = current;
struct files_struct *copy = NULL;
int error;
error = unshare_fd(CLONE_FILES, ©);
if (error || !copy) {
*displaced = NULL;
return error;
}
*displaced = task->files;
task_lock(task);
task->files = copy;
task_unlock(task);
return 0;
}
| /*
* linux/kernel/fork.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'fork.c' contains the help-routines for the 'fork' system call
* (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <trace/events/sched.h>
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
return lockdep_is_held(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
#endif /* #ifdef CONFIG_PROVE_RCU */
int nr_processes(void)
{
int cpu;
int total = 0;
for_each_possible_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
}
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static struct kmem_cache *task_struct_cachep;
#endif
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
}
static inline void free_thread_info(struct thread_info *ti)
{
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
struct kmem_cache *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
struct kmem_cache *files_cachep;
/* SLAB cache for fs_struct structures (tsk->fs) */
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void put_signal_struct(struct signal_struct *sig)
{
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
/*
* macro override instead of weak attribute alias, to workaround
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
*/
#ifndef arch_task_cache_init
#define arch_task_cache_init()
#endif
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
arch_task_cache_init();
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
* of memory.
*/
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
}
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{
*dst = *src;
return 0;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int err;
prepare_to_copy(orig);
tsk = alloc_task_struct();
if (!tsk)
return NULL;
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
err = arch_dup_task_struct(tsk, orig);
if (err)
goto out;
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
account_kernel_stack(ti, 1);
return tsk;
out:
free_thread_info(ti);
free_task_struct(tsk);
return NULL;
}
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = file->f_mapping;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
spin_lock(&mapping->i_mmap_lock);
if (tmp->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
vma_prio_tree_add(tmp, mpnt);
flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_mmap_lock);
}
/*
* Clear hugetlb-related page reserves for children. This only
* affects MAP_PRIVATE mappings. Faults generated by the child
* are not guaranteed to succeed, even if read-only
*/
if (is_vm_hugetlb_page(tmp))
reset_vma_resv_huge_pages(tmp);
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct * mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
static inline void mm_free_pgd(struct mm_struct * mm)
{
pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s)
{
default_dump_filter =
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
MMF_DUMP_FILTER_MASK;
return 1;
}
__setup("coredump_filter=", coredump_filter_setup);
#include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
spin_lock_init(&mm->ioctx_lock);
INIT_HLIST_HEAD(&mm->ioctx_list);
#endif
}
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
mmu_notifier_mm_init(mm);
return mm;
}
free_mm(mm);
return NULL;
}
/*
* Allocate and initialize an mm_struct.
*/
struct mm_struct * mm_alloc(void)
{
struct mm_struct * mm;
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm, current);
}
return mm;
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
ksm_exit(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
*
* mm_release is called after a mm_struct has been removed
* from the current process.
*
* This difference is important for error handling, when we
* only half set up a mm_struct for a new process and need to restore
* the old one. Because we mmput the new mm_struct before
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
struct completion *vfork_done = tsk->vfork_done;
/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
#endif
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
/* notify parent sleeping on vfork() */
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) &&
atomic_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
put_user(0, tsk->clear_child_tid);
sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1, NULL, NULL, 0);
}
tsk->clear_child_tid = NULL;
}
}
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
dup_mm_exe_file(oldmm, mm);
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
if (mm->binfmt && !try_module_get(mm->binfmt->module))
goto free_pt;
return mm;
free_pt:
/* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL;
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
#ifdef CONFIG_DETECT_HUNG_TASK
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
write_lock(&fs->lock);
if (fs->in_exec) {
write_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
write_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
}
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
struct files_struct *oldf, *newf;
int error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
goto out;
if (clone_flags & CLONE_FILES) {
atomic_inc(&oldf->count);
goto out;
}
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
tsk->files = newf;
error = 0;
out:
return error;
}
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
#ifdef CONFIG_BLOCK
struct io_context *ioc = current->io_context;
if (!ioc)
return 0;
/*
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
tsk->io_context = ioc_task_link(ioc);
if (unlikely(!tsk->io_context))
return -ENOMEM;
} else if (ioprio_valid(ioc->ioprio)) {
tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
if (unlikely(!tsk->io_context))
return -ENOMEM;
tsk->io_context->ioprio = ioc->ioprio;
}
#endif
return 0;
}
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
if (atomic_dec_and_test(&sighand->count))
kmem_cache_free(sighand_cachep, sighand);
}
/*
* Initialize POSIX timer handling for a thread group.
*/
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
unsigned long cpu_limit;
/* Thread group counters. */
thread_group_cputime_init(sig);
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1;
}
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD)
return 0;
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE;
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
sig->oom_adj = current->signal->oom_adj;
return 0;
}
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~PF_SUPERPRIV;
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
return task_pid_vnr(current);
}
static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
#ifdef CONFIG_MM_OWNER
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
mm->owner = p;
}
#endif /* CONFIG_MM_OWNER */
/*
* Initialize POSIX timer handling for a single task.
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = cputime_zero;
tsk->cputime_expires.virt_exp = cputime_zero;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
/*
* Siblings of global init remain as zombies on exit since they are
* not reaped by their parent (swapper). To solve this and to avoid
* multi-rooted process trees, prevent global and container-inits
* from creating siblings.
*/
if ((clone_flags & CLONE_PARENT) &&
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
rcu_copy_process(p);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
p->default_timer_slack_ns = current->timer_slack_ns;
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
p->cpuset_mem_spread_rotor = node_random(p->mems_allowed);
p->cpuset_slab_spread_rotor = node_random(p->mems_allowed);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_pid;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing and stepping should be turned off in the
* child regardless of CLONE_PTRACE.
*/
user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
perf_event_fork(p);
return p;
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
if (p->io_context)
exit_io_context(p);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
perf_event_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
}
noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
}
static inline void init_idle_pids(struct pid_link *links)
{
enum pid_type type;
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
INIT_HLIST_NODE(&links[type].node); /* not really needed */
links[type].pid = &init_struct_pid;
}
}
struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
&init_struct_pid, 0);
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
init_idle(task, cpu);
}
return task;
}
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
long nr;
/*
* Do some preliminary argument and permissions checking before we
* actually start allocating stuff
*/
if (clone_flags & CLONE_NEWUSER) {
if (clone_flags & CLONE_THREAD)
return -EINVAL;
/* hopefully this check will go away when userns support is
* complete
*/
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
!capable(CAP_SETGID))
return -EPERM;
}
/*
* We hope to recycle these flags after 2.6.26
*/
if (unlikely(clone_flags & CLONE_STOPPED)) {
static int __read_mostly count = 100;
if (count > 0 && printk_ratelimit()) {
char comm[TASK_COMM_LEN];
count--;
printk(KERN_INFO "fork(): process `%s' used deprecated "
"clone flags 0x%lx\n",
get_task_comm(comm, current),
clone_flags & CLONE_STOPPED);
}
}
/*
* When called from kernel_thread, don't do user tracing stuff.
*/
if (likely(user_mode(regs)))
trace = tracehook_prepare_clone(clone_flags);
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
trace_sched_process_fork(current, p);
nr = task_pid_vnr(p);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
}
audit_finish_fork(p);
tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
* hasn't gotten to tracehook_report_clone() yet. Now we
* clear it and set the child going.
*/
p->flags &= ~PF_STARTING;
if (unlikely(clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
__set_task_state(p, TASK_STOPPED);
} else {
wake_up_new_task(p, clone_flags);
}
tracehook_report_clone_complete(trace, regs,
clone_flags, nr, p);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
tracehook_report_vfork_done(p, nr);
}
} else {
nr = PTR_ERR(p);
}
return nr;
}
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
static void sighand_ctor(void *data)
{
struct sighand_struct *sighand = data;
spin_lock_init(&sighand->siglock);
init_waitqueue_head(&sighand->signalfd_wqh);
}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
/*
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
*/
static void check_unshare_flags(unsigned long *flags_ptr)
{
/*
* If unsharing a thread from a thread group, must also
* unshare vm.
*/
if (*flags_ptr & CLONE_THREAD)
*flags_ptr |= CLONE_VM;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (*flags_ptr & CLONE_VM)
*flags_ptr |= CLONE_SIGHAND;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (*flags_ptr & CLONE_NEWNS)
*flags_ptr |= CLONE_FS;
}
/*
* Unsharing of tasks created with CLONE_THREAD is not supported yet
*/
static int unshare_thread(unsigned long unshare_flags)
{
if (unshare_flags & CLONE_THREAD)
return -EINVAL;
return 0;
}
/*
* Unshare the filesystem structure if it is being shared
*/
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
if (!(unshare_flags & CLONE_FS) || !fs)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
if (fs->users == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
if (!*new_fsp)
return -ENOMEM;
return 0;
}
/*
* Unsharing of sighand is not supported yet
*/
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
{
struct sighand_struct *sigh = current->sighand;
if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
return -EINVAL;
else
return 0;
}
/*
* Unshare vm if it is being shared
*/
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
{
struct mm_struct *mm = current->mm;
if ((unshare_flags & CLONE_VM) &&
(mm && atomic_read(&mm->mm_users) > 1)) {
return -EINVAL;
}
return 0;
}
/*
* Unshare file descriptor table if it is being shared
*/
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
int error = 0;
if ((unshare_flags & CLONE_FILES) &&
(fd && atomic_read(&fd->count) > 1)) {
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
}
return 0;
}
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
struct sighand_struct *new_sigh = NULL;
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
check_unshare_flags(&unshare_flags);
/* Return -EINVAL for all unsupported flags */
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
goto bad_unshare_cleanup_thread;
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
goto bad_unshare_cleanup_fs;
if ((err = unshare_vm(unshare_flags, &new_mm)))
goto bad_unshare_cleanup_sigh;
if ((err = unshare_fd(unshare_flags, &new_fd)))
goto bad_unshare_cleanup_vm;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_fs)))
goto bad_unshare_cleanup_fd;
if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (new_nsproxy) {
switch_task_namespaces(current, new_nsproxy);
new_nsproxy = NULL;
}
task_lock(current);
if (new_fs) {
fs = current->fs;
write_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
write_unlock(&fs->lock);
}
if (new_mm) {
mm = current->mm;
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
activate_mm(active_mm, new_mm);
new_mm = mm;
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_vm:
if (new_mm)
mmput(new_mm);
bad_unshare_cleanup_sigh:
if (new_sigh)
if (atomic_dec_and_test(&new_sigh->count))
kmem_cache_free(sighand_cachep, new_sigh);
bad_unshare_cleanup_fs:
if (new_fs)
free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
return err;
}
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
* the exec layer of the kernel.
*/
int unshare_files(struct files_struct **displaced)
{
struct task_struct *task = current;
struct files_struct *copy = NULL;
int error;
error = unshare_fd(CLONE_FILES, ©);
if (error || !copy) {
*displaced = NULL;
return error;
}
*displaced = task->files;
task_lock(task);
task->files = copy;
task_unlock(task);
return 0;
}
|
172,426,466,034,780 | /*
* linux/kernel/fork.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'fork.c' contains the help-routines for the 'fork' system call
* (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <trace/events/sched.h>
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
return lockdep_is_held(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
#endif /* #ifdef CONFIG_PROVE_RCU */
int nr_processes(void)
{
int cpu;
int total = 0;
for_each_possible_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
}
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static struct kmem_cache *task_struct_cachep;
#endif
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
}
static inline void free_thread_info(struct thread_info *ti)
{
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
struct kmem_cache *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
struct kmem_cache *files_cachep;
/* SLAB cache for fs_struct structures (tsk->fs) */
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void put_signal_struct(struct signal_struct *sig)
{
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
/*
* macro override instead of weak attribute alias, to workaround
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
*/
#ifndef arch_task_cache_init
#define arch_task_cache_init()
#endif
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
arch_task_cache_init();
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
* of memory.
*/
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
}
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{
*dst = *src;
return 0;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int err;
prepare_to_copy(orig);
tsk = alloc_task_struct();
if (!tsk)
return NULL;
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
err = arch_dup_task_struct(tsk, orig);
if (err)
goto out;
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
account_kernel_stack(ti, 1);
return tsk;
out:
free_thread_info(ti);
free_task_struct(tsk);
return NULL;
}
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = file->f_mapping;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
spin_lock(&mapping->i_mmap_lock);
if (tmp->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
vma_prio_tree_add(tmp, mpnt);
flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_mmap_lock);
}
/*
* Clear hugetlb-related page reserves for children. This only
* affects MAP_PRIVATE mappings. Faults generated by the child
* are not guaranteed to succeed, even if read-only
*/
if (is_vm_hugetlb_page(tmp))
reset_vma_resv_huge_pages(tmp);
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
tmp->vm_prev = prev;
prev = tmp;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct * mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
static inline void mm_free_pgd(struct mm_struct * mm)
{
pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s)
{
default_dump_filter =
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
MMF_DUMP_FILTER_MASK;
return 1;
}
__setup("coredump_filter=", coredump_filter_setup);
#include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
spin_lock_init(&mm->ioctx_lock);
INIT_HLIST_HEAD(&mm->ioctx_list);
#endif
}
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
atomic_set(&mm->oom_disable_count, 0);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
mmu_notifier_mm_init(mm);
return mm;
}
free_mm(mm);
return NULL;
}
/*
* Allocate and initialize an mm_struct.
*/
struct mm_struct * mm_alloc(void)
{
struct mm_struct * mm;
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm, current);
}
return mm;
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
ksm_exit(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
*
* mm_release is called after a mm_struct has been removed
* from the current process.
*
* This difference is important for error handling, when we
* only half set up a mm_struct for a new process and need to restore
* the old one. Because we mmput the new mm_struct before
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
struct completion *vfork_done = tsk->vfork_done;
/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
#endif
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
/* notify parent sleeping on vfork() */
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) &&
atomic_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
put_user(0, tsk->clear_child_tid);
sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1, NULL, NULL, 0);
}
tsk->clear_child_tid = NULL;
}
}
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
dup_mm_exe_file(oldmm, mm);
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
if (mm->binfmt && !try_module_get(mm->binfmt->module))
goto free_pt;
return mm;
free_pt:
/* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL;
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
#ifdef CONFIG_DETECT_HUNG_TASK
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_inc(&mm->oom_disable_count);
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
spin_lock(&fs->lock);
if (fs->in_exec) {
spin_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
spin_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
}
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
struct files_struct *oldf, *newf;
int error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
goto out;
if (clone_flags & CLONE_FILES) {
atomic_inc(&oldf->count);
goto out;
}
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
tsk->files = newf;
error = 0;
out:
return error;
}
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
#ifdef CONFIG_BLOCK
struct io_context *ioc = current->io_context;
if (!ioc)
return 0;
/*
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
tsk->io_context = ioc_task_link(ioc);
if (unlikely(!tsk->io_context))
return -ENOMEM;
} else if (ioprio_valid(ioc->ioprio)) {
tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
if (unlikely(!tsk->io_context))
return -ENOMEM;
tsk->io_context->ioprio = ioc->ioprio;
}
#endif
return 0;
}
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
if (atomic_dec_and_test(&sighand->count))
kmem_cache_free(sighand_cachep, sighand);
}
/*
* Initialize POSIX timer handling for a thread group.
*/
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
unsigned long cpu_limit;
/* Thread group counters. */
thread_group_cputime_init(sig);
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1;
}
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD)
return 0;
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE;
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
sig->oom_adj = current->signal->oom_adj;
sig->oom_score_adj = current->signal->oom_score_adj;
mutex_init(&sig->cred_guard_mutex);
return 0;
}
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
return task_pid_vnr(current);
}
static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
#ifdef CONFIG_MM_OWNER
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
mm->owner = p;
}
#endif /* CONFIG_MM_OWNER */
/*
* Initialize POSIX timer handling for a single task.
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = cputime_zero;
tsk->cputime_expires.virt_exp = cputime_zero;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
/*
* Siblings of global init remain as zombies on exit since they are
* not reaped by their parent (swapper). To solve this and to avoid
* multi-rooted process trees, prevent global and container-inits
* from creating siblings.
*/
if ((clone_flags & CLONE_PARENT) &&
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
rcu_copy_process(p);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
p->default_timer_slack_ns = current->timer_slack_ns;
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_pid;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing and stepping should be turned off in the
* child regardless of CLONE_PTRACE.
*/
user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
perf_event_fork(p);
return p;
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
if (p->io_context)
exit_io_context(p);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm) {
task_lock(p);
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_dec(&p->mm->oom_disable_count);
task_unlock(p);
mmput(p->mm);
}
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
perf_event_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
}
noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
}
static inline void init_idle_pids(struct pid_link *links)
{
enum pid_type type;
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
INIT_HLIST_NODE(&links[type].node); /* not really needed */
links[type].pid = &init_struct_pid;
}
}
struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
&init_struct_pid, 0);
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
init_idle(task, cpu);
}
return task;
}
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
long nr;
/*
* Do some preliminary argument and permissions checking before we
* actually start allocating stuff
*/
if (clone_flags & CLONE_NEWUSER) {
if (clone_flags & CLONE_THREAD)
return -EINVAL;
/* hopefully this check will go away when userns support is
* complete
*/
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
!capable(CAP_SETGID))
return -EPERM;
}
/*
* We hope to recycle these flags after 2.6.26
*/
if (unlikely(clone_flags & CLONE_STOPPED)) {
static int __read_mostly count = 100;
if (count > 0 && printk_ratelimit()) {
char comm[TASK_COMM_LEN];
count--;
printk(KERN_INFO "fork(): process `%s' used deprecated "
"clone flags 0x%lx\n",
get_task_comm(comm, current),
clone_flags & CLONE_STOPPED);
}
}
/*
* When called from kernel_thread, don't do user tracing stuff.
*/
if (likely(user_mode(regs)))
trace = tracehook_prepare_clone(clone_flags);
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
trace_sched_process_fork(current, p);
nr = task_pid_vnr(p);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
}
audit_finish_fork(p);
tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
* hasn't gotten to tracehook_report_clone() yet. Now we
* clear it and set the child going.
*/
p->flags &= ~PF_STARTING;
if (unlikely(clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
__set_task_state(p, TASK_STOPPED);
} else {
wake_up_new_task(p, clone_flags);
}
tracehook_report_clone_complete(trace, regs,
clone_flags, nr, p);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
tracehook_report_vfork_done(p, nr);
}
} else {
nr = PTR_ERR(p);
}
return nr;
}
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
static void sighand_ctor(void *data)
{
struct sighand_struct *sighand = data;
spin_lock_init(&sighand->siglock);
init_waitqueue_head(&sighand->signalfd_wqh);
}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
/*
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
*/
static void check_unshare_flags(unsigned long *flags_ptr)
{
/*
* If unsharing a thread from a thread group, must also
* unshare vm.
*/
if (*flags_ptr & CLONE_THREAD)
*flags_ptr |= CLONE_VM;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (*flags_ptr & CLONE_VM)
*flags_ptr |= CLONE_SIGHAND;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (*flags_ptr & CLONE_NEWNS)
*flags_ptr |= CLONE_FS;
}
/*
* Unsharing of tasks created with CLONE_THREAD is not supported yet
*/
static int unshare_thread(unsigned long unshare_flags)
{
if (unshare_flags & CLONE_THREAD)
return -EINVAL;
return 0;
}
/*
* Unshare the filesystem structure if it is being shared
*/
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
if (!(unshare_flags & CLONE_FS) || !fs)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
if (fs->users == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
if (!*new_fsp)
return -ENOMEM;
return 0;
}
/*
* Unsharing of sighand is not supported yet
*/
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
{
struct sighand_struct *sigh = current->sighand;
if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
return -EINVAL;
else
return 0;
}
/*
* Unshare vm if it is being shared
*/
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
{
struct mm_struct *mm = current->mm;
if ((unshare_flags & CLONE_VM) &&
(mm && atomic_read(&mm->mm_users) > 1)) {
return -EINVAL;
}
return 0;
}
/*
* Unshare file descriptor table if it is being shared
*/
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
int error = 0;
if ((unshare_flags & CLONE_FILES) &&
(fd && atomic_read(&fd->count) > 1)) {
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
}
return 0;
}
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
struct sighand_struct *new_sigh = NULL;
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
check_unshare_flags(&unshare_flags);
/* Return -EINVAL for all unsupported flags */
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
goto bad_unshare_cleanup_thread;
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
goto bad_unshare_cleanup_fs;
if ((err = unshare_vm(unshare_flags, &new_mm)))
goto bad_unshare_cleanup_sigh;
if ((err = unshare_fd(unshare_flags, &new_fd)))
goto bad_unshare_cleanup_vm;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_fs)))
goto bad_unshare_cleanup_fd;
if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (new_nsproxy) {
switch_task_namespaces(current, new_nsproxy);
new_nsproxy = NULL;
}
task_lock(current);
if (new_fs) {
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
spin_unlock(&fs->lock);
}
if (new_mm) {
mm = current->mm;
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
atomic_dec(&mm->oom_disable_count);
atomic_inc(&new_mm->oom_disable_count);
}
activate_mm(active_mm, new_mm);
new_mm = mm;
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_vm:
if (new_mm)
mmput(new_mm);
bad_unshare_cleanup_sigh:
if (new_sigh)
if (atomic_dec_and_test(&new_sigh->count))
kmem_cache_free(sighand_cachep, new_sigh);
bad_unshare_cleanup_fs:
if (new_fs)
free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
return err;
}
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
* the exec layer of the kernel.
*/
int unshare_files(struct files_struct **displaced)
{
struct task_struct *task = current;
struct files_struct *copy = NULL;
int error;
error = unshare_fd(CLONE_FILES, ©);
if (error || !copy) {
*displaced = NULL;
return error;
}
*displaced = task->files;
task_lock(task);
task->files = copy;
task_unlock(task);
return 0;
}
| /*
* linux/kernel/fork.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'fork.c' contains the help-routines for the 'fork' system call
* (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <trace/events/sched.h>
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
return lockdep_is_held(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
#endif /* #ifdef CONFIG_PROVE_RCU */
int nr_processes(void)
{
int cpu;
int total = 0;
for_each_possible_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
}
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static struct kmem_cache *task_struct_cachep;
#endif
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
}
static inline void free_thread_info(struct thread_info *ti)
{
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
struct kmem_cache *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
struct kmem_cache *files_cachep;
/* SLAB cache for fs_struct structures (tsk->fs) */
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void put_signal_struct(struct signal_struct *sig)
{
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
/*
* macro override instead of weak attribute alias, to workaround
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
*/
#ifndef arch_task_cache_init
#define arch_task_cache_init()
#endif
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
arch_task_cache_init();
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
* of memory.
*/
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
/*
* we need to allow at least 20 threads to boot a system
*/
if(max_threads < 20)
max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
}
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{
*dst = *src;
return 0;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int err;
prepare_to_copy(orig);
tsk = alloc_task_struct();
if (!tsk)
return NULL;
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
err = arch_dup_task_struct(tsk, orig);
if (err)
goto out;
tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/* One for us, one for whoever does the "release_task()" (usually parent) */
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
account_kernel_stack(ti, 1);
return tsk;
out:
free_thread_info(ti);
free_task_struct(tsk);
return NULL;
}
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
struct address_space *mapping = file->f_mapping;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
spin_lock(&mapping->i_mmap_lock);
if (tmp->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
vma_prio_tree_add(tmp, mpnt);
flush_dcache_mmap_unlock(mapping);
spin_unlock(&mapping->i_mmap_lock);
}
/*
* Clear hugetlb-related page reserves for children. This only
* affects MAP_PRIVATE mappings. Faults generated by the child
* are not guaranteed to succeed, even if read-only
*/
if (is_vm_hugetlb_page(tmp))
reset_vma_resv_huge_pages(tmp);
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
tmp->vm_prev = prev;
prev = tmp;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct * mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
static inline void mm_free_pgd(struct mm_struct * mm)
{
pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s)
{
default_dump_filter =
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
MMF_DUMP_FILTER_MASK;
return 1;
}
__setup("coredump_filter=", coredump_filter_setup);
#include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
spin_lock_init(&mm->ioctx_lock);
INIT_HLIST_HEAD(&mm->ioctx_list);
#endif
}
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_init_aio(mm);
mm_init_owner(mm, p);
atomic_set(&mm->oom_disable_count, 0);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
mmu_notifier_mm_init(mm);
return mm;
}
free_mm(mm);
return NULL;
}
/*
* Allocate and initialize an mm_struct.
*/
struct mm_struct * mm_alloc(void)
{
struct mm_struct * mm;
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm, current);
}
return mm;
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
void mmput(struct mm_struct *mm)
{
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
ksm_exit(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
*
* mm_release is called after a mm_struct has been removed
* from the current process.
*
* This difference is important for error handling, when we
* only half set up a mm_struct for a new process and need to restore
* the old one. Because we mmput the new mm_struct before
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
struct completion *vfork_done = tsk->vfork_done;
/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
#endif
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
/* notify parent sleeping on vfork() */
if (vfork_done) {
tsk->vfork_done = NULL;
complete(vfork_done);
}
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble otherwise. Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) &&
atomic_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
put_user(0, tsk->clear_child_tid);
sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1, NULL, NULL, 0);
}
tsk->clear_child_tid = NULL;
}
}
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
dup_mm_exe_file(oldmm, mm);
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
if (mm->binfmt && !try_module_get(mm->binfmt->module))
goto free_pt;
return mm;
free_pt:
/* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL;
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
#ifdef CONFIG_DETECT_HUNG_TASK
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
/* Initializing for Swap token stuff */
mm->token_priority = 0;
mm->last_interval = 0;
if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_inc(&mm->oom_disable_count);
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
spin_lock(&fs->lock);
if (fs->in_exec) {
spin_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
spin_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
}
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
struct files_struct *oldf, *newf;
int error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
goto out;
if (clone_flags & CLONE_FILES) {
atomic_inc(&oldf->count);
goto out;
}
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
tsk->files = newf;
error = 0;
out:
return error;
}
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
#ifdef CONFIG_BLOCK
struct io_context *ioc = current->io_context;
if (!ioc)
return 0;
/*
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
tsk->io_context = ioc_task_link(ioc);
if (unlikely(!tsk->io_context))
return -ENOMEM;
} else if (ioprio_valid(ioc->ioprio)) {
tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
if (unlikely(!tsk->io_context))
return -ENOMEM;
tsk->io_context->ioprio = ioc->ioprio;
}
#endif
return 0;
}
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
if (atomic_dec_and_test(&sighand->count))
kmem_cache_free(sighand_cachep, sighand);
}
/*
* Initialize POSIX timer handling for a thread group.
*/
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
unsigned long cpu_limit;
/* Thread group counters. */
thread_group_cputime_init(sig);
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1;
}
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD)
return 0;
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
init_waitqueue_head(&sig->wait_chldexit);
if (clone_flags & CLONE_NEWPID)
sig->flags |= SIGNAL_UNKILLABLE;
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
sig->oom_adj = current->signal->oom_adj;
sig->oom_score_adj = current->signal->oom_score_adj;
mutex_init(&sig->cred_guard_mutex);
return 0;
}
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
return task_pid_vnr(current);
}
static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
#ifdef CONFIG_MM_OWNER
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
mm->owner = p;
}
#endif /* CONFIG_MM_OWNER */
/*
* Initialize POSIX timer handling for a single task.
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = cputime_zero;
tsk->cputime_expires.virt_exp = cputime_zero;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
/*
* Siblings of global init remain as zombies on exit since they are
* not reaped by their parent (swapper). To solve this and to avoid
* multi-rooted process trees, prevent global and container-inits
* from creating siblings.
*/
if ((clone_flags & CLONE_PARENT) &&
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
rcu_copy_process(p);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
p->utime = cputime_zero;
p->stime = cputime_zero;
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
p->prev_utime = cputime_zero;
p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
p->default_timer_slack_ns = current->timer_slack_ns;
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
p->hardirqs_enabled = 1;
#else
p->hardirqs_enabled = 0;
#endif
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
if ((retval = copy_semundo(clone_flags, p)))
goto bad_fork_cleanup_audit;
if ((retval = copy_files(clone_flags, p)))
goto bad_fork_cleanup_semundo;
if ((retval = copy_fs(clone_flags, p)))
goto bad_fork_cleanup_files;
if ((retval = copy_sighand(clone_flags, p)))
goto bad_fork_cleanup_fs;
if ((retval = copy_signal(clone_flags, p)))
goto bad_fork_cleanup_sighand;
if ((retval = copy_mm(clone_flags, p)))
goto bad_fork_cleanup_signal;
if ((retval = copy_namespaces(clone_flags, p)))
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
if (clone_flags & CLONE_NEWPID) {
retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
if (retval < 0)
goto bad_fork_free_pid;
}
}
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
goto bad_fork_free_pid;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing and stepping should be turned off in the
* child regardless of CLONE_PTRACE.
*/
user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
* on the tasklist. */
cgroup_fork_callbacks(p);
cgroup_callbacks_done = 1;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
perf_event_fork(p);
return p;
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
if (p->io_context)
exit_io_context(p);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm) {
task_lock(p);
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
atomic_dec(&p->mm->oom_disable_count);
task_unlock(p);
mmput(p->mm);
}
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
perf_event_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
}
noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
}
static inline void init_idle_pids(struct pid_link *links)
{
enum pid_type type;
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
INIT_HLIST_NODE(&links[type].node); /* not really needed */
links[type].pid = &init_struct_pid;
}
}
struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
struct pt_regs regs;
task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL,
&init_struct_pid, 0);
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
init_idle(task, cpu);
}
return task;
}
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
long nr;
/*
* Do some preliminary argument and permissions checking before we
* actually start allocating stuff
*/
if (clone_flags & CLONE_NEWUSER) {
if (clone_flags & CLONE_THREAD)
return -EINVAL;
/* hopefully this check will go away when userns support is
* complete
*/
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
!capable(CAP_SETGID))
return -EPERM;
}
/*
* We hope to recycle these flags after 2.6.26
*/
if (unlikely(clone_flags & CLONE_STOPPED)) {
static int __read_mostly count = 100;
if (count > 0 && printk_ratelimit()) {
char comm[TASK_COMM_LEN];
count--;
printk(KERN_INFO "fork(): process `%s' used deprecated "
"clone flags 0x%lx\n",
get_task_comm(comm, current),
clone_flags & CLONE_STOPPED);
}
}
/*
* When called from kernel_thread, don't do user tracing stuff.
*/
if (likely(user_mode(regs)))
trace = tracehook_prepare_clone(clone_flags);
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
trace_sched_process_fork(current, p);
nr = task_pid_vnr(p);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
}
audit_finish_fork(p);
tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
* hasn't gotten to tracehook_report_clone() yet. Now we
* clear it and set the child going.
*/
p->flags &= ~PF_STARTING;
if (unlikely(clone_flags & CLONE_STOPPED)) {
/*
* We'll start up with an immediate SIGSTOP.
*/
sigaddset(&p->pending.signal, SIGSTOP);
set_tsk_thread_flag(p, TIF_SIGPENDING);
__set_task_state(p, TASK_STOPPED);
} else {
wake_up_new_task(p, clone_flags);
}
tracehook_report_clone_complete(trace, regs,
clone_flags, nr, p);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
tracehook_report_vfork_done(p, nr);
}
} else {
nr = PTR_ERR(p);
}
return nr;
}
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
static void sighand_ctor(void *data)
{
struct sighand_struct *sighand = data;
spin_lock_init(&sighand->siglock);
init_waitqueue_head(&sighand->signalfd_wqh);
}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
/*
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
*/
static void check_unshare_flags(unsigned long *flags_ptr)
{
/*
* If unsharing a thread from a thread group, must also
* unshare vm.
*/
if (*flags_ptr & CLONE_THREAD)
*flags_ptr |= CLONE_VM;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (*flags_ptr & CLONE_VM)
*flags_ptr |= CLONE_SIGHAND;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (*flags_ptr & CLONE_NEWNS)
*flags_ptr |= CLONE_FS;
}
/*
* Unsharing of tasks created with CLONE_THREAD is not supported yet
*/
static int unshare_thread(unsigned long unshare_flags)
{
if (unshare_flags & CLONE_THREAD)
return -EINVAL;
return 0;
}
/*
* Unshare the filesystem structure if it is being shared
*/
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
if (!(unshare_flags & CLONE_FS) || !fs)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
if (fs->users == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
if (!*new_fsp)
return -ENOMEM;
return 0;
}
/*
* Unsharing of sighand is not supported yet
*/
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
{
struct sighand_struct *sigh = current->sighand;
if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
return -EINVAL;
else
return 0;
}
/*
* Unshare vm if it is being shared
*/
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
{
struct mm_struct *mm = current->mm;
if ((unshare_flags & CLONE_VM) &&
(mm && atomic_read(&mm->mm_users) > 1)) {
return -EINVAL;
}
return 0;
}
/*
* Unshare file descriptor table if it is being shared
*/
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
int error = 0;
if ((unshare_flags & CLONE_FILES) &&
(fd && atomic_read(&fd->count) > 1)) {
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
}
return 0;
}
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
int err = 0;
struct fs_struct *fs, *new_fs = NULL;
struct sighand_struct *new_sigh = NULL;
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
check_unshare_flags(&unshare_flags);
/* Return -EINVAL for all unsupported flags */
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
if ((err = unshare_thread(unshare_flags)))
goto bad_unshare_out;
if ((err = unshare_fs(unshare_flags, &new_fs)))
goto bad_unshare_cleanup_thread;
if ((err = unshare_sighand(unshare_flags, &new_sigh)))
goto bad_unshare_cleanup_fs;
if ((err = unshare_vm(unshare_flags, &new_mm)))
goto bad_unshare_cleanup_sigh;
if ((err = unshare_fd(unshare_flags, &new_fd)))
goto bad_unshare_cleanup_vm;
if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_fs)))
goto bad_unshare_cleanup_fd;
if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (new_nsproxy) {
switch_task_namespaces(current, new_nsproxy);
new_nsproxy = NULL;
}
task_lock(current);
if (new_fs) {
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
spin_unlock(&fs->lock);
}
if (new_mm) {
mm = current->mm;
active_mm = current->active_mm;
current->mm = new_mm;
current->active_mm = new_mm;
if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
atomic_dec(&mm->oom_disable_count);
atomic_inc(&new_mm->oom_disable_count);
}
activate_mm(active_mm, new_mm);
new_mm = mm;
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_vm:
if (new_mm)
mmput(new_mm);
bad_unshare_cleanup_sigh:
if (new_sigh)
if (atomic_dec_and_test(&new_sigh->count))
kmem_cache_free(sighand_cachep, new_sigh);
bad_unshare_cleanup_fs:
if (new_fs)
free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
return err;
}
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
* the exec layer of the kernel.
*/
int unshare_files(struct files_struct **displaced)
{
struct task_struct *task = current;
struct files_struct *copy = NULL;
int error;
error = unshare_fd(CLONE_FILES, ©);
if (error || !copy) {
*displaced = NULL;
return error;
}
*displaced = task->files;
task_lock(task);
task->files = copy;
task_unlock(task);
return 0;
}
|
13,143,650,092,801 | /*
BNEP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2001-2002 Inventel Systemes
Written 2001-2002 by
David Libault <david.libault@inventel.fr>
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include "bnep.h"
static int bnep_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
BT_DBG("sock %p sk %p", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct bnep_connlist_req cl;
struct bnep_connadd_req ca;
struct bnep_conndel_req cd;
struct bnep_conninfo ci;
struct socket *nsock;
void __user *argp = (void __user *)arg;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
nsock = sockfd_lookup(ca.sock, &err);
if (!nsock)
return err;
if (nsock->sk->sk_state != BT_CONNECTED) {
sockfd_put(nsock);
return -EBADFD;
}
err = bnep_add_connection(&ca, nsock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else
sockfd_put(nsock);
return err;
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return bnep_del_connection(&cd);
case BNEPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
return -EFAULT;
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && copy_to_user(argp, &cl, sizeof(cl)))
return -EFAULT;
return err;
case BNEPGETCONNINFO:
if (copy_from_user(&ci, argp, sizeof(ci)))
return -EFAULT;
err = bnep_get_conninfo(&ci);
if (!err && copy_to_user(argp, &ci, sizeof(ci)))
return -EFAULT;
return err;
default:
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_COMPAT
static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
uint32_t uci;
int err;
if (get_user(cl.cnum, (uint32_t __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
cl.ci = compat_ptr(uci);
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
err = -EFAULT;
return err;
}
return bnep_sock_ioctl(sock, cmd, arg);
}
#endif
static const struct proto_ops bnep_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = bnep_sock_release,
.ioctl = bnep_sock_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = bnep_sock_compat_ioctl,
#endif
.bind = sock_no_bind,
.getname = sock_no_getname,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.poll = sock_no_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static struct proto bnep_proto = {
.name = "BNEP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct bt_sock)
};
static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &bnep_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
return 0;
}
static const struct net_proto_family bnep_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = bnep_sock_create
};
int __init bnep_sock_init(void)
{
int err;
err = proto_register(&bnep_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
if (err < 0)
goto error;
return 0;
error:
BT_ERR("Can't register BNEP socket");
proto_unregister(&bnep_proto);
return err;
}
void __exit bnep_sock_cleanup(void)
{
if (bt_sock_unregister(BTPROTO_BNEP) < 0)
BT_ERR("Can't unregister BNEP socket");
proto_unregister(&bnep_proto);
}
| /*
BNEP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2001-2002 Inventel Systemes
Written 2001-2002 by
David Libault <david.libault@inventel.fr>
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include "bnep.h"
static int bnep_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
BT_DBG("sock %p sk %p", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct bnep_connlist_req cl;
struct bnep_connadd_req ca;
struct bnep_conndel_req cd;
struct bnep_conninfo ci;
struct socket *nsock;
void __user *argp = (void __user *)arg;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case BNEPCONNADD:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
nsock = sockfd_lookup(ca.sock, &err);
if (!nsock)
return err;
if (nsock->sk->sk_state != BT_CONNECTED) {
sockfd_put(nsock);
return -EBADFD;
}
ca.device[sizeof(ca.device)-1] = 0;
err = bnep_add_connection(&ca, nsock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else
sockfd_put(nsock);
return err;
case BNEPCONNDEL:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return bnep_del_connection(&cd);
case BNEPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
return -EFAULT;
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && copy_to_user(argp, &cl, sizeof(cl)))
return -EFAULT;
return err;
case BNEPGETCONNINFO:
if (copy_from_user(&ci, argp, sizeof(ci)))
return -EFAULT;
err = bnep_get_conninfo(&ci);
if (!err && copy_to_user(argp, &ci, sizeof(ci)))
return -EFAULT;
return err;
default:
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_COMPAT
static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
uint32_t uci;
int err;
if (get_user(cl.cnum, (uint32_t __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
cl.ci = compat_ptr(uci);
if (cl.cnum <= 0)
return -EINVAL;
err = bnep_get_connlist(&cl);
if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
err = -EFAULT;
return err;
}
return bnep_sock_ioctl(sock, cmd, arg);
}
#endif
static const struct proto_ops bnep_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = bnep_sock_release,
.ioctl = bnep_sock_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = bnep_sock_compat_ioctl,
#endif
.bind = sock_no_bind,
.getname = sock_no_getname,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.poll = sock_no_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static struct proto bnep_proto = {
.name = "BNEP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct bt_sock)
};
static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &bnep_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
return 0;
}
static const struct net_proto_family bnep_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = bnep_sock_create
};
int __init bnep_sock_init(void)
{
int err;
err = proto_register(&bnep_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops);
if (err < 0)
goto error;
return 0;
error:
BT_ERR("Can't register BNEP socket");
proto_unregister(&bnep_proto);
return err;
}
void __exit bnep_sock_cleanup(void)
{
if (bt_sock_unregister(BTPROTO_BNEP) < 0)
BT_ERR("Can't unregister BNEP socket");
proto_unregister(&bnep_proto);
}
|
12,519,783,640,276 | /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth SCO sockets. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
#define VERSION "0.6"
static int disable_esco;
static const struct proto_ops sco_sock_ops;
static struct bt_sock_list sco_sk_list = {
.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
};
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static int sco_conn_del(struct hci_conn *conn, int err);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ---- SCO timers ---- */
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn || status)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
conn->src = &hdev->bdaddr;
conn->dst = &hcon->dst;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}
static inline struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
return sk;
}
static int sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
sco_sock_kill(sk);
}
hcon->sco_data = NULL;
kfree(conn);
return 0;
}
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk)
err = -EBUSY;
else
__sco_chan_add(conn, sk, parent);
sco_conn_unlock(conn);
return err;
}
static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock_bh(hdev);
err = -ENOMEM;
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (!hcon)
goto done;
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
goto done;
}
/* Update source addr of the socket */
bacpy(src, conn->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err, count;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
skb = bt_skb_send_alloc(sk, count,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return count;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
{
struct sock *sk;
struct hlist_node *node;
sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
goto found;
sk = NULL;
found:
return sk;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return node ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
sco_sock_close(sk);
sco_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __sco_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Must be called on unlocked socket. */
static void sco_sock_close(struct sock *sk)
{
sco_sock_clear_timer(sk);
lock_sock(sk);
__sco_sock_close(sk);
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent)
sk->sk_type = parent->sk_type;
}
static struct proto sco_proto = {
.name = "SCO",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sco_pinfo)
};
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
return sk;
}
static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
write_lock_bh(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
}
write_unlock_bh(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
return 0;
}
static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
sco_sock_clear_timer(sk);
__sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bt_accept_enqueue(parent, sk);
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_ZAPPED);
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent;
struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
sco_conn_lock(conn);
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
if (!parent)
goto done;
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
}
sco_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
}
done:
sco_conn_unlock(conn);
}
/* ----- SCO interface with lower layer (HCI) ----- */
static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
register struct sock *sk;
struct hlist_node *node;
int lm = 0;
if (type != SCO_LINK && type != ESCO_LINK)
return -EINVAL;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
}
read_unlock(&sco_sk_list.lock);
return lm;
}
static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon, status);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bt_err(status));
return 0;
}
static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
sco_conn_del(hcon, bt_err(reason));
return 0;
}
static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return 0;
}
drop:
kfree_skb(skb);
return 0;
}
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
struct hlist_node *node;
read_lock_bh(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock_bh(&sco_sk_list.lock);
return 0;
}
static int sco_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sco_debugfs_show, inode->i_private);
}
static const struct file_operations sco_debugfs_fops = {
.open = sco_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *sco_debugfs;
static const struct proto_ops sco_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = sco_sock_release,
.bind = sco_sock_bind,
.connect = sco_sock_connect,
.listen = sco_sock_listen,
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = bt_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
static const struct net_proto_family sco_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = sco_sock_create,
};
static struct hci_proto sco_hci_proto = {
.name = "SCO",
.id = HCI_PROTO_SCO,
.connect_ind = sco_connect_ind,
.connect_cfm = sco_connect_cfm,
.disconn_cfm = sco_disconn_cfm,
.recv_scodata = sco_recv_scodata
};
static int __init sco_init(void)
{
int err;
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops);
if (err < 0) {
BT_ERR("SCO socket registration failed");
goto error;
}
err = hci_register_proto(&sco_hci_proto);
if (err < 0) {
BT_ERR("SCO protocol registration failed");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
BT_INFO("SCO (Voice Link) ver %s", VERSION);
BT_INFO("SCO socket layer initialized");
return 0;
error:
proto_unregister(&sco_proto);
return err;
}
static void __exit sco_exit(void)
{
debugfs_remove(sco_debugfs);
if (bt_sock_unregister(BTPROTO_SCO) < 0)
BT_ERR("SCO socket unregistration failed");
if (hci_unregister_proto(&sco_hci_proto) < 0)
BT_ERR("SCO protocol unregistration failed");
proto_unregister(&sco_proto);
}
module_init(sco_init);
module_exit(sco_exit);
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("bt-proto-2");
| /*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth SCO sockets. */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <net/sock.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
#define VERSION "0.6"
static int disable_esco;
static const struct proto_ops sco_sock_ops;
static struct bt_sock_list sco_sk_list = {
.lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock)
};
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static int sco_conn_del(struct hci_conn *conn, int err);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ---- SCO timers ---- */
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
BT_DBG("sock %p state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
/* ---- SCO connections ---- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn = hcon->sco_data;
if (conn || status)
return conn;
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
if (!conn)
return NULL;
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
conn->src = &hdev->bdaddr;
conn->dst = &hcon->dst;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}
static inline struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
return sk;
}
static int sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
if (!conn)
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
sk = sco_chan_get(conn);
if (sk) {
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
sco_sock_kill(sk);
}
hcon->sco_data = NULL;
kfree(conn);
return 0;
}
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk)
err = -EBUSY;
else
__sco_chan_add(conn, sk, parent);
sco_conn_unlock(conn);
return err;
}
static int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err, type;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
hdev = hci_get_route(dst, src);
if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock_bh(hdev);
err = -ENOMEM;
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (!hcon)
goto done;
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
goto done;
}
/* Update source addr of the socket */
bacpy(src, conn->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sk_sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err, count;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
count = min_t(unsigned int, conn->mtu, len);
skb = bt_skb_send_alloc(sk, count,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
kfree_skb(skb);
return -EFAULT;
}
hci_send_sco(conn->hcon, skb);
return count;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
{
struct sock *sk;
struct hlist_node *node;
sk_for_each(sk, node, &sco_sk_list.head)
if (!bacmp(&bt_sk(sk)->src, ba))
goto found;
sk = NULL;
found:
return sk;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return node ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
sco_sock_close(sk);
sco_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __sco_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Must be called on unlocked socket. */
static void sco_sock_close(struct sock *sk)
{
sco_sock_clear_timer(sk);
lock_sock(sk);
__sco_sock_close(sk);
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent)
sk->sk_type = parent->sk_type;
}
static struct proto sco_proto = {
.name = "SCO",
.owner = THIS_MODULE,
.obj_size = sizeof(struct sco_pinfo)
};
static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
return sk;
}
static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
write_lock_bh(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
sk->sk_state = BT_BOUND;
}
write_unlock_bh(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
return -EBADFD;
if (sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
err = sco_connect(sk);
if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (!(ch = bt_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
return 0;
}
static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
err = sock_error(sk);
if (err)
return err;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->sk_state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int sco_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
sco_sock_clear_timer(sk);
__sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sco_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
lock_sock(sk);
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
release_sock(sk);
}
sock_orphan(sk);
sco_sock_kill(sk);
return err;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bt_accept_enqueue(parent, sk);
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_ZAPPED);
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent;
struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
sco_conn_lock(conn);
if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
if (!parent)
goto done;
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
}
sco_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
parent->sk_data_ready(parent, 1);
bh_unlock_sock(parent);
}
done:
sco_conn_unlock(conn);
}
/* ----- SCO interface with lower layer (HCI) ----- */
static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
register struct sock *sk;
struct hlist_node *node;
int lm = 0;
if (type != SCO_LINK && type != ESCO_LINK)
return -EINVAL;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Find listening sockets */
read_lock(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
if (sk->sk_state != BT_LISTEN)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
}
read_unlock(&sco_sk_list.lock);
return lm;
}
static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon, status);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bt_err(status));
return 0;
}
static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return -EINVAL;
sco_conn_del(hcon, bt_err(reason));
return 0;
}
static int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return 0;
}
drop:
kfree_skb(skb);
return 0;
}
static int sco_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
struct hlist_node *node;
read_lock_bh(&sco_sk_list.lock);
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock_bh(&sco_sk_list.lock);
return 0;
}
static int sco_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sco_debugfs_show, inode->i_private);
}
static const struct file_operations sco_debugfs_fops = {
.open = sco_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *sco_debugfs;
static const struct proto_ops sco_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = sco_sock_release,
.bind = sco_sock_bind,
.connect = sco_sock_connect,
.listen = sco_sock_listen,
.accept = sco_sock_accept,
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = bt_sock_recvmsg,
.poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
static const struct net_proto_family sco_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = sco_sock_create,
};
static struct hci_proto sco_hci_proto = {
.name = "SCO",
.id = HCI_PROTO_SCO,
.connect_ind = sco_connect_ind,
.connect_cfm = sco_connect_cfm,
.disconn_cfm = sco_disconn_cfm,
.recv_scodata = sco_recv_scodata
};
static int __init sco_init(void)
{
int err;
err = proto_register(&sco_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops);
if (err < 0) {
BT_ERR("SCO socket registration failed");
goto error;
}
err = hci_register_proto(&sco_hci_proto);
if (err < 0) {
BT_ERR("SCO protocol registration failed");
bt_sock_unregister(BTPROTO_SCO);
goto error;
}
if (bt_debugfs) {
sco_debugfs = debugfs_create_file("sco", 0444,
bt_debugfs, NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
BT_INFO("SCO (Voice Link) ver %s", VERSION);
BT_INFO("SCO socket layer initialized");
return 0;
error:
proto_unregister(&sco_proto);
return err;
}
static void __exit sco_exit(void)
{
debugfs_remove(sco_debugfs);
if (bt_sock_unregister(BTPROTO_SCO) < 0)
BT_ERR("SCO socket unregistration failed");
if (hci_unregister_proto(&sco_hci_proto) < 0)
BT_ERR("SCO protocol unregistration failed");
proto_unregister(&sco_proto);
}
module_init(sco_init);
module_exit(sco_exit);
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("bt-proto-2");
|
60,099,808,988,429 | /*********************************************************************
*
* Filename: iriap.c
* Version: 0.8
* Description: Information Access Protocol (IAP)
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
* Modified at: Sat Dec 25 16:42:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <net/irda/irda.h>
#include <net/irda/irttp.h>
#include <net/irda/irlmp.h>
#include <net/irda/irias_object.h>
#include <net/irda/iriap_event.h>
#include <net/irda/iriap.h>
#ifdef CONFIG_IRDA_DEBUG
/* FIXME: This one should go in irlmp.c */
static const char *const ias_charset_types[] = {
"CS_ASCII",
"CS_ISO_8859_1",
"CS_ISO_8859_2",
"CS_ISO_8859_3",
"CS_ISO_8859_4",
"CS_ISO_8859_5",
"CS_ISO_8859_6",
"CS_ISO_8859_7",
"CS_ISO_8859_8",
"CS_ISO_8859_9",
"CS_UNICODE"
};
#endif /* CONFIG_IRDA_DEBUG */
static hashbin_t *iriap = NULL;
static void *service_handle;
static void __iriap_close(struct iriap_cb *self);
static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode);
static void iriap_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb);
static void iriap_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 max_header_size,
struct sk_buff *skb);
static void iriap_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size, __u8 max_header_size,
struct sk_buff *skb);
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb);
static void iriap_watchdog_timer_expired(void *data);
static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
int timeout)
{
irda_start_timer(&self->watchdog_timer, timeout, self,
iriap_watchdog_timer_expired);
}
/*
* Function iriap_init (void)
*
* Initializes the IrIAP layer, called by the module initialization code
* in irmod.c
*/
int __init iriap_init(void)
{
struct ias_object *obj;
struct iriap_cb *server;
__u8 oct_seq[6];
__u16 hints;
/* Allocate master array */
iriap = hashbin_new(HB_LOCK);
if (!iriap)
return -ENOMEM;
/* Object repository - defined in irias_object.c */
irias_objects = hashbin_new(HB_LOCK);
if (!irias_objects) {
IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n",
__func__);
hashbin_delete(iriap, NULL);
return -ENOMEM;
}
/*
* Register some default services for IrLMP
*/
hints = irlmp_service_to_hint(S_COMPUTER);
service_handle = irlmp_register_service(hints);
/* Register the Device object with LM-IAS */
obj = irias_new_object("Device", IAS_DEVICE_ID);
irias_add_string_attrib(obj, "DeviceName", "Linux", IAS_KERNEL_ATTR);
oct_seq[0] = 0x01; /* Version 1 */
oct_seq[1] = 0x00; /* IAS support bits */
oct_seq[2] = 0x00; /* LM-MUX support bits */
#ifdef CONFIG_IRDA_ULTRA
oct_seq[2] |= 0x04; /* Connectionless Data support */
#endif
irias_add_octseq_attrib(obj, "IrLMPSupport", oct_seq, 3,
IAS_KERNEL_ATTR);
irias_insert_object(obj);
/*
* Register server support with IrLMP so we can accept incoming
* connections
*/
server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
if (!server) {
IRDA_DEBUG(0, "%s(), unable to open server\n", __func__);
return -1;
}
iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);
return 0;
}
/*
* Function iriap_cleanup (void)
*
* Initializes the IrIAP layer, called by the module cleanup code in
* irmod.c
*/
void iriap_cleanup(void)
{
irlmp_unregister_service(service_handle);
hashbin_delete(iriap, (FREE_FUNC) __iriap_close);
hashbin_delete(irias_objects, (FREE_FUNC) __irias_delete_object);
}
/*
* Function iriap_open (void)
*
* Opens an instance of the IrIAP layer, and registers with IrLMP
*/
struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
CONFIRM_CALLBACK callback)
{
struct iriap_cb *self;
IRDA_DEBUG(2, "%s()\n", __func__);
self = kzalloc(sizeof(*self), GFP_ATOMIC);
if (!self) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
return NULL;
}
/*
* Initialize instance
*/
self->magic = IAS_MAGIC;
self->mode = mode;
if (mode == IAS_CLIENT)
iriap_register_lsap(self, slsap_sel, mode);
self->confirm = callback;
self->priv = priv;
/* iriap_getvaluebyclass_request() will construct packets before
* we connect, so this must have a sane value... Jean II */
self->max_header_size = LMP_MAX_HEADER;
init_timer(&self->watchdog_timer);
hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL);
/* Initialize state machines */
iriap_next_client_state(self, S_DISCONNECT);
iriap_next_call_state(self, S_MAKE_CALL);
iriap_next_server_state(self, R_DISCONNECT);
iriap_next_r_connect_state(self, R_WAITING);
return self;
}
EXPORT_SYMBOL(iriap_open);
/*
* Function __iriap_close (self)
*
* Removes (deallocates) the IrIAP instance
*
*/
static void __iriap_close(struct iriap_cb *self)
{
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
del_timer(&self->watchdog_timer);
if (self->request_skb)
dev_kfree_skb(self->request_skb);
self->magic = 0;
kfree(self);
}
/*
* Function iriap_close (void)
*
* Closes IrIAP and deregisters with IrLMP
*/
void iriap_close(struct iriap_cb *self)
{
struct iriap_cb *entry;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
if (self->lsap) {
irlmp_close_lsap(self->lsap);
self->lsap = NULL;
}
entry = (struct iriap_cb *) hashbin_remove(iriap, (long) self, NULL);
IRDA_ASSERT(entry == self, return;);
__iriap_close(self);
}
EXPORT_SYMBOL(iriap_close);
static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
{
notify_t notify;
IRDA_DEBUG(2, "%s()\n", __func__);
irda_notify_init(¬ify);
notify.connect_confirm = iriap_connect_confirm;
notify.connect_indication = iriap_connect_indication;
notify.disconnect_indication = iriap_disconnect_indication;
notify.data_indication = iriap_data_indication;
notify.instance = self;
if (mode == IAS_CLIENT)
strcpy(notify.name, "IrIAS cli");
else
strcpy(notify.name, "IrIAS srv");
self->lsap = irlmp_open_lsap(slsap_sel, ¬ify, 0);
if (self->lsap == NULL) {
IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__);
return -1;
}
self->slsap_sel = self->lsap->slsap_sel;
return 0;
}
/*
* Function iriap_disconnect_indication (handle, reason)
*
* Got disconnect, so clean up everything associated with this connection
*
*/
static void iriap_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *skb)
{
struct iriap_cb *self;
IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
self = (struct iriap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(iriap != NULL, return;);
del_timer(&self->watchdog_timer);
/* Not needed */
if (skb)
dev_kfree_skb(skb);
if (self->mode == IAS_CLIENT) {
IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__);
iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION,
NULL);
/*
* Inform service user that the request failed by sending
* it a NULL value. Warning, the client might close us, so
* remember no to use self anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
} else {
IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__);
iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION,
NULL);
iriap_close(self);
}
}
/*
* Function iriap_disconnect_request (handle)
*/
static void iriap_disconnect_request(struct iriap_cb *self)
{
struct sk_buff *tx_skb;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (tx_skb == NULL) {
IRDA_DEBUG(0,
"%s(), Could not allocate an sk_buff of length %d\n",
__func__, LMP_MAX_HEADER);
return;
}
/*
* Reserve space for MUX control and LAP header
*/
skb_reserve(tx_skb, LMP_MAX_HEADER);
irlmp_disconnect_request(self->lsap, tx_skb);
}
/*
* Function iriap_getvaluebyclass (addr, name, attr)
*
* Retrieve all values from attribute in all objects with given class
* name
*/
int iriap_getvaluebyclass_request(struct iriap_cb *self,
__u32 saddr, __u32 daddr,
char *name, char *attr)
{
struct sk_buff *tx_skb;
int name_len, attr_len, skb_len;
__u8 *frame;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return -1;);
/* Client must supply the destination device address */
if (!daddr)
return -1;
self->daddr = daddr;
self->saddr = saddr;
/*
* Save operation, so we know what the later indication is about
*/
self->operation = GET_VALUE_BY_CLASS;
/* Give ourselves 10 secs to finish this operation */
iriap_start_watchdog_timer(self, 10*HZ);
name_len = strlen(name); /* Up to IAS_MAX_CLASSNAME = 60 */
attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
skb_len = self->max_header_size+2+name_len+1+attr_len+4;
tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX and LAP header */
skb_reserve(tx_skb, self->max_header_size);
skb_put(tx_skb, 3+name_len+attr_len);
frame = tx_skb->data;
/* Build frame */
frame[0] = IAP_LST | GET_VALUE_BY_CLASS;
frame[1] = name_len; /* Insert length of name */
memcpy(frame+2, name, name_len); /* Insert name */
frame[2+name_len] = attr_len; /* Insert length of attr */
memcpy(frame+3+name_len, attr, attr_len); /* Insert attr */
iriap_do_client_event(self, IAP_CALL_REQUEST_GVBC, tx_skb);
/* Drop reference count - see state_s_disconnect(). */
dev_kfree_skb(tx_skb);
return 0;
}
EXPORT_SYMBOL(iriap_getvaluebyclass_request);
/*
* Function iriap_getvaluebyclass_confirm (self, skb)
*
* Got result from GetValueByClass command. Parse it and return result
* to service user.
*
*/
static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
struct sk_buff *skb)
{
struct ias_value *value;
int charset;
__u32 value_len;
__u32 tmp_cpu32;
__u16 obj_id;
__u16 len;
__u8 type;
__u8 *fp;
int n;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
/* Initialize variables */
fp = skb->data;
n = 2;
/* Get length, MSB first */
len = get_unaligned_be16(fp + n);
n += 2;
IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len);
/* Get object ID, MSB first */
obj_id = get_unaligned_be16(fp + n);
n += 2;
type = fp[n++];
IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type);
switch (type) {
case IAS_INTEGER:
memcpy(&tmp_cpu32, fp+n, 4); n += 4;
be32_to_cpus(&tmp_cpu32);
value = irias_new_integer_value(tmp_cpu32);
/* Legal values restricted to 0x01-0x6f, page 15 irttp */
IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer);
break;
case IAS_STRING:
charset = fp[n++];
switch (charset) {
case CS_ASCII:
break;
/* case CS_ISO_8859_1: */
/* case CS_ISO_8859_2: */
/* case CS_ISO_8859_3: */
/* case CS_ISO_8859_4: */
/* case CS_ISO_8859_5: */
/* case CS_ISO_8859_6: */
/* case CS_ISO_8859_7: */
/* case CS_ISO_8859_8: */
/* case CS_ISO_8859_9: */
/* case CS_UNICODE: */
default:
IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
__func__, ias_charset_types[charset]);
/* Aborting, close connection! */
iriap_disconnect_request(self);
return;
/* break; */
}
value_len = fp[n++];
IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
/* Make sure the string is null-terminated */
if (n + value_len < skb->len)
fp[n + value_len] = 0x00;
IRDA_DEBUG(4, "Got string %s\n", fp+n);
/* Will truncate to IAS_MAX_STRING bytes */
value = irias_new_string_value(fp+n);
break;
case IAS_OCT_SEQ:
value_len = get_unaligned_be16(fp + n);
n += 2;
/* Will truncate to IAS_MAX_OCTET_STRING bytes */
value = irias_new_octseq_value(fp+n, value_len);
break;
default:
value = irias_new_missing_value();
break;
}
/* Finished, close connection! */
iriap_disconnect_request(self);
/* Warning, the client might close us, so remember no to use self
* anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
else {
IRDA_DEBUG(0, "%s(), missing handler!\n", __func__);
irias_delete_value(value);
}
}
/*
* Function iriap_getvaluebyclass_response ()
*
* Send answer back to remote LM-IAS
*
*/
static void iriap_getvaluebyclass_response(struct iriap_cb *self,
__u16 obj_id,
__u8 ret_code,
struct ias_value *value)
{
struct sk_buff *tx_skb;
int n;
__be32 tmp_be32;
__be16 tmp_be16;
__u8 *fp;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(value != NULL, return;);
IRDA_ASSERT(value->len <= 1024, return;);
/* Initialize variables */
n = 0;
/*
* We must adjust the size of the response after the length of the
* value. We add 32 bytes because of the 6 bytes for the frame and
* max 5 bytes for the value coding.
*/
tx_skb = alloc_skb(value->len + self->max_header_size + 32,
GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for MUX and LAP header */
skb_reserve(tx_skb, self->max_header_size);
skb_put(tx_skb, 6);
fp = tx_skb->data;
/* Build frame */
fp[n++] = GET_VALUE_BY_CLASS | IAP_LST;
fp[n++] = ret_code;
/* Insert list length (MSB first) */
tmp_be16 = htons(0x0001);
memcpy(fp+n, &tmp_be16, 2); n += 2;
/* Insert object identifier ( MSB first) */
tmp_be16 = cpu_to_be16(obj_id);
memcpy(fp+n, &tmp_be16, 2); n += 2;
switch (value->type) {
case IAS_STRING:
skb_put(tx_skb, 3 + value->len);
fp[n++] = value->type;
fp[n++] = 0; /* ASCII */
fp[n++] = (__u8) value->len;
memcpy(fp+n, value->t.string, value->len); n+=value->len;
break;
case IAS_INTEGER:
skb_put(tx_skb, 5);
fp[n++] = value->type;
tmp_be32 = cpu_to_be32(value->t.integer);
memcpy(fp+n, &tmp_be32, 4); n += 4;
break;
case IAS_OCT_SEQ:
skb_put(tx_skb, 3 + value->len);
fp[n++] = value->type;
tmp_be16 = cpu_to_be16(value->len);
memcpy(fp+n, &tmp_be16, 2); n += 2;
memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len;
break;
case IAS_MISSING:
IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__);
skb_put(tx_skb, 1);
fp[n++] = value->type;
break;
default:
IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__);
break;
}
iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb);
/* Drop reference count - see state_r_execute(). */
dev_kfree_skb(tx_skb);
}
/*
* Function iriap_getvaluebyclass_indication (self, skb)
*
* getvaluebyclass is requested from peer LM-IAS
*
*/
static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
struct sk_buff *skb)
{
struct ias_object *obj;
struct ias_attrib *attrib;
int name_len;
int attr_len;
char name[IAS_MAX_CLASSNAME + 1]; /* 60 bytes */
char attr[IAS_MAX_ATTRIBNAME + 1]; /* 60 bytes */
__u8 *fp;
int n;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
fp = skb->data;
n = 1;
name_len = fp[n++];
memcpy(name, fp+n, name_len); n+=name_len;
name[name_len] = '\0';
attr_len = fp[n++];
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
obj = irias_find_object(name);
if (obj == NULL) {
IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name);
iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
&irias_missing);
return;
}
IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
attrib = irias_find_attrib(obj, attr);
if (attrib == NULL) {
IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
iriap_getvaluebyclass_response(self, obj->id,
IAS_ATTRIB_UNKNOWN,
&irias_missing);
return;
}
/* We have a match; send the value. */
iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
attrib->value);
}
/*
* Function iriap_send_ack (void)
*
* Currently not used
*
*/
void iriap_send_ack(struct iriap_cb *self)
{
struct sk_buff *tx_skb;
__u8 *frame;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
tx_skb = alloc_skb(LMP_MAX_HEADER + 1, GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for MUX and LAP header */
skb_reserve(tx_skb, self->max_header_size);
skb_put(tx_skb, 1);
frame = tx_skb->data;
/* Build frame */
frame[0] = IAP_LST | IAP_ACK | self->operation;
irlmp_data_request(self->lsap, tx_skb);
}
void iriap_connect_request(struct iriap_cb *self)
{
int ret;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
ret = irlmp_connect_request(self->lsap, LSAP_IAS,
self->saddr, self->daddr,
NULL, NULL);
if (ret < 0) {
IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
}
}
/*
* Function iriap_connect_confirm (handle, skb)
*
* LSAP connection confirmed!
*
*/
static void iriap_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct iriap_cb *self;
self = (struct iriap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
self->max_data_size = max_seg_size;
self->max_header_size = max_header_size;
del_timer(&self->watchdog_timer);
iriap_do_client_event(self, IAP_LM_CONNECT_CONFIRM, skb);
/* Drop reference count - see state_s_make_call(). */
dev_kfree_skb(skb);
}
/*
* Function iriap_connect_indication ( handle, skb)
*
* Remote LM-IAS is requesting connection
*
*/
static void iriap_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct iriap_cb *self, *new;
IRDA_DEBUG(1, "%s()\n", __func__);
self = (struct iriap_cb *) instance;
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(self != NULL, goto out;);
IRDA_ASSERT(self->magic == IAS_MAGIC, goto out;);
/* Start new server */
new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
if (!new) {
IRDA_DEBUG(0, "%s(), open failed\n", __func__);
goto out;
}
/* Now attach up the new "socket" */
new->lsap = irlmp_dup(self->lsap, new);
if (!new->lsap) {
IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
goto out;
}
new->max_data_size = max_seg_size;
new->max_header_size = max_header_size;
/* Clean up the original one to keep it in listen state */
irlmp_listen(self->lsap);
iriap_do_server_event(new, IAP_LM_CONNECT_INDICATION, skb);
out:
/* Drop reference count - see state_r_disconnect(). */
dev_kfree_skb(skb);
}
/*
* Function iriap_data_indication (handle, skb)
*
* Receives data from connection identified by handle from IrLMP
*
*/
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct iriap_cb *self;
__u8 *frame;
__u8 opcode;
IRDA_DEBUG(3, "%s()\n", __func__);
self = (struct iriap_cb *) instance;
IRDA_ASSERT(skb != NULL, return 0;);
IRDA_ASSERT(self != NULL, goto out;);
IRDA_ASSERT(self->magic == IAS_MAGIC, goto out;);
frame = skb->data;
if (self->mode == IAS_SERVER) {
/* Call server */
IRDA_DEBUG(4, "%s(), Calling server!\n", __func__);
iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb);
goto out;
}
opcode = frame[0];
if (~opcode & IAP_LST) {
IRDA_WARNING("%s:, IrIAS multiframe commands or "
"results is not implemented yet!\n",
__func__);
goto out;
}
/* Check for ack frames since they don't contain any data */
if (opcode & IAP_ACK) {
IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__);
goto out;
}
opcode &= ~IAP_LST; /* Mask away LST bit */
switch (opcode) {
case GET_INFO_BASE:
IRDA_DEBUG(0, "IrLMP GetInfoBaseDetails not implemented!\n");
break;
case GET_VALUE_BY_CLASS:
iriap_do_call_event(self, IAP_RECV_F_LST, NULL);
switch (frame[1]) {
case IAS_SUCCESS:
iriap_getvaluebyclass_confirm(self, skb);
break;
case IAS_CLASS_UNKNOWN:
IRDA_DEBUG(1, "%s(), No such class!\n", __func__);
/* Finished, close connection! */
iriap_disconnect_request(self);
/*
* Warning, the client might close us, so remember
* no to use self anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
self->priv);
break;
case IAS_ATTRIB_UNKNOWN:
IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__);
/* Finished, close connection! */
iriap_disconnect_request(self);
/*
* Warning, the client might close us, so remember
* no to use self anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_ATTRIB_UNKNOWN, 0, NULL,
self->priv);
break;
}
break;
default:
IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__,
opcode);
break;
}
out:
/* Cleanup - sub-calls will have done skb_get() as needed. */
dev_kfree_skb(skb);
return 0;
}
/*
* Function iriap_call_indication (self, skb)
*
* Received call to server from peer LM-IAS
*
*/
void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
{
__u8 *fp;
__u8 opcode;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
fp = skb->data;
opcode = fp[0];
if (~opcode & 0x80) {
IRDA_WARNING("%s: IrIAS multiframe commands or results "
"is not implemented yet!\n", __func__);
return;
}
opcode &= 0x7f; /* Mask away LST bit */
switch (opcode) {
case GET_INFO_BASE:
IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n",
__func__);
break;
case GET_VALUE_BY_CLASS:
iriap_getvaluebyclass_indication(self, skb);
break;
}
/* skb will be cleaned up in iriap_data_indication */
}
/*
* Function iriap_watchdog_timer_expired (data)
*
* Query has taken too long time, so abort
*
*/
static void iriap_watchdog_timer_expired(void *data)
{
struct iriap_cb *self = (struct iriap_cb *) data;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
/* iriap_close(self); */
}
#ifdef CONFIG_PROC_FS
static const char *const ias_value_types[] = {
"IAS_MISSING",
"IAS_INTEGER",
"IAS_OCT_SEQ",
"IAS_STRING"
};
static inline struct ias_object *irias_seq_idx(loff_t pos)
{
struct ias_object *obj;
for (obj = (struct ias_object *) hashbin_get_first(irias_objects);
obj; obj = (struct ias_object *) hashbin_get_next(irias_objects)) {
if (pos-- == 0)
break;
}
return obj;
}
static void *irias_seq_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_irq(&irias_objects->hb_spinlock);
return *pos ? irias_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *irias_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN)
? (void *) hashbin_get_first(irias_objects)
: (void *) hashbin_get_next(irias_objects);
}
static void irias_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irias_objects->hb_spinlock);
}
static int irias_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "LM-IAS Objects:\n");
else {
struct ias_object *obj = v;
struct ias_attrib *attrib;
IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -EINVAL;);
seq_printf(seq, "name: %s, id=%d\n",
obj->name, obj->id);
/* Careful for priority inversions here !
* All other uses of attrib spinlock are independent of
* the object spinlock, so we are safe. Jean II */
spin_lock(&obj->attribs->hb_spinlock);
/* List all attributes for this object */
for (attrib = (struct ias_attrib *) hashbin_get_first(obj->attribs);
attrib != NULL;
attrib = (struct ias_attrib *) hashbin_get_next(obj->attribs)) {
IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC,
goto outloop; );
seq_printf(seq, " - Attribute name: \"%s\", ",
attrib->name);
seq_printf(seq, "value[%s]: ",
ias_value_types[attrib->value->type]);
switch (attrib->value->type) {
case IAS_INTEGER:
seq_printf(seq, "%d\n",
attrib->value->t.integer);
break;
case IAS_STRING:
seq_printf(seq, "\"%s\"\n",
attrib->value->t.string);
break;
case IAS_OCT_SEQ:
seq_printf(seq, "octet sequence (%d bytes)\n",
attrib->value->len);
break;
case IAS_MISSING:
seq_puts(seq, "missing\n");
break;
default:
seq_printf(seq, "type %d?\n",
attrib->value->type);
}
seq_putc(seq, '\n');
}
IRDA_ASSERT_LABEL(outloop:)
spin_unlock(&obj->attribs->hb_spinlock);
}
return 0;
}
static const struct seq_operations irias_seq_ops = {
.start = irias_seq_start,
.next = irias_seq_next,
.stop = irias_seq_stop,
.show = irias_seq_show,
};
static int irias_seq_open(struct inode *inode, struct file *file)
{
IRDA_ASSERT( irias_objects != NULL, return -EINVAL;);
return seq_open(file, &irias_seq_ops);
}
const struct file_operations irias_seq_fops = {
.owner = THIS_MODULE,
.open = irias_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* PROC_FS */
| /*********************************************************************
*
* Filename: iriap.c
* Version: 0.8
* Description: Information Access Protocol (IAP)
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Thu Aug 21 00:02:07 1997
* Modified at: Sat Dec 25 16:42:42 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <net/irda/irda.h>
#include <net/irda/irttp.h>
#include <net/irda/irlmp.h>
#include <net/irda/irias_object.h>
#include <net/irda/iriap_event.h>
#include <net/irda/iriap.h>
#ifdef CONFIG_IRDA_DEBUG
/* FIXME: This one should go in irlmp.c */
static const char *const ias_charset_types[] = {
"CS_ASCII",
"CS_ISO_8859_1",
"CS_ISO_8859_2",
"CS_ISO_8859_3",
"CS_ISO_8859_4",
"CS_ISO_8859_5",
"CS_ISO_8859_6",
"CS_ISO_8859_7",
"CS_ISO_8859_8",
"CS_ISO_8859_9",
"CS_UNICODE"
};
#endif /* CONFIG_IRDA_DEBUG */
static hashbin_t *iriap = NULL;
static void *service_handle;
static void __iriap_close(struct iriap_cb *self);
static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode);
static void iriap_disconnect_indication(void *instance, void *sap,
LM_REASON reason, struct sk_buff *skb);
static void iriap_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_sdu_size,
__u8 max_header_size,
struct sk_buff *skb);
static void iriap_connect_confirm(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size, __u8 max_header_size,
struct sk_buff *skb);
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb);
static void iriap_watchdog_timer_expired(void *data);
static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
int timeout)
{
irda_start_timer(&self->watchdog_timer, timeout, self,
iriap_watchdog_timer_expired);
}
/*
* Function iriap_init (void)
*
* Initializes the IrIAP layer, called by the module initialization code
* in irmod.c
*/
int __init iriap_init(void)
{
struct ias_object *obj;
struct iriap_cb *server;
__u8 oct_seq[6];
__u16 hints;
/* Allocate master array */
iriap = hashbin_new(HB_LOCK);
if (!iriap)
return -ENOMEM;
/* Object repository - defined in irias_object.c */
irias_objects = hashbin_new(HB_LOCK);
if (!irias_objects) {
IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n",
__func__);
hashbin_delete(iriap, NULL);
return -ENOMEM;
}
/*
* Register some default services for IrLMP
*/
hints = irlmp_service_to_hint(S_COMPUTER);
service_handle = irlmp_register_service(hints);
/* Register the Device object with LM-IAS */
obj = irias_new_object("Device", IAS_DEVICE_ID);
irias_add_string_attrib(obj, "DeviceName", "Linux", IAS_KERNEL_ATTR);
oct_seq[0] = 0x01; /* Version 1 */
oct_seq[1] = 0x00; /* IAS support bits */
oct_seq[2] = 0x00; /* LM-MUX support bits */
#ifdef CONFIG_IRDA_ULTRA
oct_seq[2] |= 0x04; /* Connectionless Data support */
#endif
irias_add_octseq_attrib(obj, "IrLMPSupport", oct_seq, 3,
IAS_KERNEL_ATTR);
irias_insert_object(obj);
/*
* Register server support with IrLMP so we can accept incoming
* connections
*/
server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
if (!server) {
IRDA_DEBUG(0, "%s(), unable to open server\n", __func__);
return -1;
}
iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);
return 0;
}
/*
* Function iriap_cleanup (void)
*
* Initializes the IrIAP layer, called by the module cleanup code in
* irmod.c
*/
void iriap_cleanup(void)
{
irlmp_unregister_service(service_handle);
hashbin_delete(iriap, (FREE_FUNC) __iriap_close);
hashbin_delete(irias_objects, (FREE_FUNC) __irias_delete_object);
}
/*
* Function iriap_open (void)
*
* Opens an instance of the IrIAP layer, and registers with IrLMP
*/
struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
CONFIRM_CALLBACK callback)
{
struct iriap_cb *self;
IRDA_DEBUG(2, "%s()\n", __func__);
self = kzalloc(sizeof(*self), GFP_ATOMIC);
if (!self) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __func__);
return NULL;
}
/*
* Initialize instance
*/
self->magic = IAS_MAGIC;
self->mode = mode;
if (mode == IAS_CLIENT)
iriap_register_lsap(self, slsap_sel, mode);
self->confirm = callback;
self->priv = priv;
/* iriap_getvaluebyclass_request() will construct packets before
* we connect, so this must have a sane value... Jean II */
self->max_header_size = LMP_MAX_HEADER;
init_timer(&self->watchdog_timer);
hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL);
/* Initialize state machines */
iriap_next_client_state(self, S_DISCONNECT);
iriap_next_call_state(self, S_MAKE_CALL);
iriap_next_server_state(self, R_DISCONNECT);
iriap_next_r_connect_state(self, R_WAITING);
return self;
}
EXPORT_SYMBOL(iriap_open);
/*
* Function __iriap_close (self)
*
* Removes (deallocates) the IrIAP instance
*
*/
static void __iriap_close(struct iriap_cb *self)
{
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
del_timer(&self->watchdog_timer);
if (self->request_skb)
dev_kfree_skb(self->request_skb);
self->magic = 0;
kfree(self);
}
/*
* Function iriap_close (void)
*
* Closes IrIAP and deregisters with IrLMP
*/
void iriap_close(struct iriap_cb *self)
{
struct iriap_cb *entry;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
if (self->lsap) {
irlmp_close_lsap(self->lsap);
self->lsap = NULL;
}
entry = (struct iriap_cb *) hashbin_remove(iriap, (long) self, NULL);
IRDA_ASSERT(entry == self, return;);
__iriap_close(self);
}
EXPORT_SYMBOL(iriap_close);
static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode)
{
notify_t notify;
IRDA_DEBUG(2, "%s()\n", __func__);
irda_notify_init(¬ify);
notify.connect_confirm = iriap_connect_confirm;
notify.connect_indication = iriap_connect_indication;
notify.disconnect_indication = iriap_disconnect_indication;
notify.data_indication = iriap_data_indication;
notify.instance = self;
if (mode == IAS_CLIENT)
strcpy(notify.name, "IrIAS cli");
else
strcpy(notify.name, "IrIAS srv");
self->lsap = irlmp_open_lsap(slsap_sel, ¬ify, 0);
if (self->lsap == NULL) {
IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__);
return -1;
}
self->slsap_sel = self->lsap->slsap_sel;
return 0;
}
/*
* Function iriap_disconnect_indication (handle, reason)
*
* Got disconnect, so clean up everything associated with this connection
*
*/
static void iriap_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *skb)
{
struct iriap_cb *self;
IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
self = (struct iriap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(iriap != NULL, return;);
del_timer(&self->watchdog_timer);
/* Not needed */
if (skb)
dev_kfree_skb(skb);
if (self->mode == IAS_CLIENT) {
IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__);
iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION,
NULL);
/*
* Inform service user that the request failed by sending
* it a NULL value. Warning, the client might close us, so
* remember no to use self anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
} else {
IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__);
iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION,
NULL);
iriap_close(self);
}
}
/*
* Function iriap_disconnect_request (handle)
*/
static void iriap_disconnect_request(struct iriap_cb *self)
{
struct sk_buff *tx_skb;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
if (tx_skb == NULL) {
IRDA_DEBUG(0,
"%s(), Could not allocate an sk_buff of length %d\n",
__func__, LMP_MAX_HEADER);
return;
}
/*
* Reserve space for MUX control and LAP header
*/
skb_reserve(tx_skb, LMP_MAX_HEADER);
irlmp_disconnect_request(self->lsap, tx_skb);
}
/*
* Function iriap_getvaluebyclass (addr, name, attr)
*
* Retrieve all values from attribute in all objects with given class
* name
*/
int iriap_getvaluebyclass_request(struct iriap_cb *self,
__u32 saddr, __u32 daddr,
char *name, char *attr)
{
struct sk_buff *tx_skb;
int name_len, attr_len, skb_len;
__u8 *frame;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return -1;);
/* Client must supply the destination device address */
if (!daddr)
return -1;
self->daddr = daddr;
self->saddr = saddr;
/*
* Save operation, so we know what the later indication is about
*/
self->operation = GET_VALUE_BY_CLASS;
/* Give ourselves 10 secs to finish this operation */
iriap_start_watchdog_timer(self, 10*HZ);
name_len = strlen(name); /* Up to IAS_MAX_CLASSNAME = 60 */
attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */
skb_len = self->max_header_size+2+name_len+1+attr_len+4;
tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
if (!tx_skb)
return -ENOMEM;
/* Reserve space for MUX and LAP header */
skb_reserve(tx_skb, self->max_header_size);
skb_put(tx_skb, 3+name_len+attr_len);
frame = tx_skb->data;
/* Build frame */
frame[0] = IAP_LST | GET_VALUE_BY_CLASS;
frame[1] = name_len; /* Insert length of name */
memcpy(frame+2, name, name_len); /* Insert name */
frame[2+name_len] = attr_len; /* Insert length of attr */
memcpy(frame+3+name_len, attr, attr_len); /* Insert attr */
iriap_do_client_event(self, IAP_CALL_REQUEST_GVBC, tx_skb);
/* Drop reference count - see state_s_disconnect(). */
dev_kfree_skb(tx_skb);
return 0;
}
EXPORT_SYMBOL(iriap_getvaluebyclass_request);
/*
* Function iriap_getvaluebyclass_confirm (self, skb)
*
* Got result from GetValueByClass command. Parse it and return result
* to service user.
*
*/
static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
struct sk_buff *skb)
{
struct ias_value *value;
int charset;
__u32 value_len;
__u32 tmp_cpu32;
__u16 obj_id;
__u16 len;
__u8 type;
__u8 *fp;
int n;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
/* Initialize variables */
fp = skb->data;
n = 2;
/* Get length, MSB first */
len = get_unaligned_be16(fp + n);
n += 2;
IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len);
/* Get object ID, MSB first */
obj_id = get_unaligned_be16(fp + n);
n += 2;
type = fp[n++];
IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type);
switch (type) {
case IAS_INTEGER:
memcpy(&tmp_cpu32, fp+n, 4); n += 4;
be32_to_cpus(&tmp_cpu32);
value = irias_new_integer_value(tmp_cpu32);
/* Legal values restricted to 0x01-0x6f, page 15 irttp */
IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer);
break;
case IAS_STRING:
charset = fp[n++];
switch (charset) {
case CS_ASCII:
break;
/* case CS_ISO_8859_1: */
/* case CS_ISO_8859_2: */
/* case CS_ISO_8859_3: */
/* case CS_ISO_8859_4: */
/* case CS_ISO_8859_5: */
/* case CS_ISO_8859_6: */
/* case CS_ISO_8859_7: */
/* case CS_ISO_8859_8: */
/* case CS_ISO_8859_9: */
/* case CS_UNICODE: */
default:
IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
__func__, ias_charset_types[charset]);
/* Aborting, close connection! */
iriap_disconnect_request(self);
return;
/* break; */
}
value_len = fp[n++];
IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len);
/* Make sure the string is null-terminated */
if (n + value_len < skb->len)
fp[n + value_len] = 0x00;
IRDA_DEBUG(4, "Got string %s\n", fp+n);
/* Will truncate to IAS_MAX_STRING bytes */
value = irias_new_string_value(fp+n);
break;
case IAS_OCT_SEQ:
value_len = get_unaligned_be16(fp + n);
n += 2;
/* Will truncate to IAS_MAX_OCTET_STRING bytes */
value = irias_new_octseq_value(fp+n, value_len);
break;
default:
value = irias_new_missing_value();
break;
}
/* Finished, close connection! */
iriap_disconnect_request(self);
/* Warning, the client might close us, so remember no to use self
* anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_SUCCESS, obj_id, value, self->priv);
else {
IRDA_DEBUG(0, "%s(), missing handler!\n", __func__);
irias_delete_value(value);
}
}
/*
* Function iriap_getvaluebyclass_response ()
*
* Send answer back to remote LM-IAS
*
*/
static void iriap_getvaluebyclass_response(struct iriap_cb *self,
__u16 obj_id,
__u8 ret_code,
struct ias_value *value)
{
struct sk_buff *tx_skb;
int n;
__be32 tmp_be32;
__be16 tmp_be16;
__u8 *fp;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(value != NULL, return;);
IRDA_ASSERT(value->len <= 1024, return;);
/* Initialize variables */
n = 0;
/*
* We must adjust the size of the response after the length of the
* value. We add 32 bytes because of the 6 bytes for the frame and
* max 5 bytes for the value coding.
*/
tx_skb = alloc_skb(value->len + self->max_header_size + 32,
GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for MUX and LAP header */
skb_reserve(tx_skb, self->max_header_size);
skb_put(tx_skb, 6);
fp = tx_skb->data;
/* Build frame */
fp[n++] = GET_VALUE_BY_CLASS | IAP_LST;
fp[n++] = ret_code;
/* Insert list length (MSB first) */
tmp_be16 = htons(0x0001);
memcpy(fp+n, &tmp_be16, 2); n += 2;
/* Insert object identifier ( MSB first) */
tmp_be16 = cpu_to_be16(obj_id);
memcpy(fp+n, &tmp_be16, 2); n += 2;
switch (value->type) {
case IAS_STRING:
skb_put(tx_skb, 3 + value->len);
fp[n++] = value->type;
fp[n++] = 0; /* ASCII */
fp[n++] = (__u8) value->len;
memcpy(fp+n, value->t.string, value->len); n+=value->len;
break;
case IAS_INTEGER:
skb_put(tx_skb, 5);
fp[n++] = value->type;
tmp_be32 = cpu_to_be32(value->t.integer);
memcpy(fp+n, &tmp_be32, 4); n += 4;
break;
case IAS_OCT_SEQ:
skb_put(tx_skb, 3 + value->len);
fp[n++] = value->type;
tmp_be16 = cpu_to_be16(value->len);
memcpy(fp+n, &tmp_be16, 2); n += 2;
memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len;
break;
case IAS_MISSING:
IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__);
skb_put(tx_skb, 1);
fp[n++] = value->type;
break;
default:
IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__);
break;
}
iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb);
/* Drop reference count - see state_r_execute(). */
dev_kfree_skb(tx_skb);
}
/*
* Function iriap_getvaluebyclass_indication (self, skb)
*
* getvaluebyclass is requested from peer LM-IAS
*
*/
static void iriap_getvaluebyclass_indication(struct iriap_cb *self,
struct sk_buff *skb)
{
struct ias_object *obj;
struct ias_attrib *attrib;
int name_len;
int attr_len;
char name[IAS_MAX_CLASSNAME + 1]; /* 60 bytes */
char attr[IAS_MAX_ATTRIBNAME + 1]; /* 60 bytes */
__u8 *fp;
int n;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
fp = skb->data;
n = 1;
name_len = fp[n++];
IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;);
memcpy(name, fp+n, name_len); n+=name_len;
name[name_len] = '\0';
attr_len = fp[n++];
IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;);
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr);
obj = irias_find_object(name);
if (obj == NULL) {
IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name);
iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN,
&irias_missing);
return;
}
IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id);
attrib = irias_find_attrib(obj, attr);
if (attrib == NULL) {
IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr);
iriap_getvaluebyclass_response(self, obj->id,
IAS_ATTRIB_UNKNOWN,
&irias_missing);
return;
}
/* We have a match; send the value. */
iriap_getvaluebyclass_response(self, obj->id, IAS_SUCCESS,
attrib->value);
}
/*
* Function iriap_send_ack (void)
*
* Currently not used
*
*/
void iriap_send_ack(struct iriap_cb *self)
{
struct sk_buff *tx_skb;
__u8 *frame;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
tx_skb = alloc_skb(LMP_MAX_HEADER + 1, GFP_ATOMIC);
if (!tx_skb)
return;
/* Reserve space for MUX and LAP header */
skb_reserve(tx_skb, self->max_header_size);
skb_put(tx_skb, 1);
frame = tx_skb->data;
/* Build frame */
frame[0] = IAP_LST | IAP_ACK | self->operation;
irlmp_data_request(self->lsap, tx_skb);
}
void iriap_connect_request(struct iriap_cb *self)
{
int ret;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
ret = irlmp_connect_request(self->lsap, LSAP_IAS,
self->saddr, self->daddr,
NULL, NULL);
if (ret < 0) {
IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
self->confirm(IAS_DISCONNECT, 0, NULL, self->priv);
}
}
/*
* Function iriap_connect_confirm (handle, skb)
*
* LSAP connection confirmed!
*
*/
static void iriap_connect_confirm(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct iriap_cb *self;
self = (struct iriap_cb *) instance;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
self->max_data_size = max_seg_size;
self->max_header_size = max_header_size;
del_timer(&self->watchdog_timer);
iriap_do_client_event(self, IAP_LM_CONNECT_CONFIRM, skb);
/* Drop reference count - see state_s_make_call(). */
dev_kfree_skb(skb);
}
/*
* Function iriap_connect_indication ( handle, skb)
*
* Remote LM-IAS is requesting connection
*
*/
static void iriap_connect_indication(void *instance, void *sap,
struct qos_info *qos, __u32 max_seg_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct iriap_cb *self, *new;
IRDA_DEBUG(1, "%s()\n", __func__);
self = (struct iriap_cb *) instance;
IRDA_ASSERT(skb != NULL, return;);
IRDA_ASSERT(self != NULL, goto out;);
IRDA_ASSERT(self->magic == IAS_MAGIC, goto out;);
/* Start new server */
new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
if (!new) {
IRDA_DEBUG(0, "%s(), open failed\n", __func__);
goto out;
}
/* Now attach up the new "socket" */
new->lsap = irlmp_dup(self->lsap, new);
if (!new->lsap) {
IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
goto out;
}
new->max_data_size = max_seg_size;
new->max_header_size = max_header_size;
/* Clean up the original one to keep it in listen state */
irlmp_listen(self->lsap);
iriap_do_server_event(new, IAP_LM_CONNECT_INDICATION, skb);
out:
/* Drop reference count - see state_r_disconnect(). */
dev_kfree_skb(skb);
}
/*
* Function iriap_data_indication (handle, skb)
*
* Receives data from connection identified by handle from IrLMP
*
*/
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct iriap_cb *self;
__u8 *frame;
__u8 opcode;
IRDA_DEBUG(3, "%s()\n", __func__);
self = (struct iriap_cb *) instance;
IRDA_ASSERT(skb != NULL, return 0;);
IRDA_ASSERT(self != NULL, goto out;);
IRDA_ASSERT(self->magic == IAS_MAGIC, goto out;);
frame = skb->data;
if (self->mode == IAS_SERVER) {
/* Call server */
IRDA_DEBUG(4, "%s(), Calling server!\n", __func__);
iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb);
goto out;
}
opcode = frame[0];
if (~opcode & IAP_LST) {
IRDA_WARNING("%s:, IrIAS multiframe commands or "
"results is not implemented yet!\n",
__func__);
goto out;
}
/* Check for ack frames since they don't contain any data */
if (opcode & IAP_ACK) {
IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__);
goto out;
}
opcode &= ~IAP_LST; /* Mask away LST bit */
switch (opcode) {
case GET_INFO_BASE:
IRDA_DEBUG(0, "IrLMP GetInfoBaseDetails not implemented!\n");
break;
case GET_VALUE_BY_CLASS:
iriap_do_call_event(self, IAP_RECV_F_LST, NULL);
switch (frame[1]) {
case IAS_SUCCESS:
iriap_getvaluebyclass_confirm(self, skb);
break;
case IAS_CLASS_UNKNOWN:
IRDA_DEBUG(1, "%s(), No such class!\n", __func__);
/* Finished, close connection! */
iriap_disconnect_request(self);
/*
* Warning, the client might close us, so remember
* no to use self anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_CLASS_UNKNOWN, 0, NULL,
self->priv);
break;
case IAS_ATTRIB_UNKNOWN:
IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__);
/* Finished, close connection! */
iriap_disconnect_request(self);
/*
* Warning, the client might close us, so remember
* no to use self anymore after calling confirm
*/
if (self->confirm)
self->confirm(IAS_ATTRIB_UNKNOWN, 0, NULL,
self->priv);
break;
}
break;
default:
IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__,
opcode);
break;
}
out:
/* Cleanup - sub-calls will have done skb_get() as needed. */
dev_kfree_skb(skb);
return 0;
}
/*
* Function iriap_call_indication (self, skb)
*
* Received call to server from peer LM-IAS
*
*/
void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
{
__u8 *fp;
__u8 opcode;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
fp = skb->data;
opcode = fp[0];
if (~opcode & 0x80) {
IRDA_WARNING("%s: IrIAS multiframe commands or results "
"is not implemented yet!\n", __func__);
return;
}
opcode &= 0x7f; /* Mask away LST bit */
switch (opcode) {
case GET_INFO_BASE:
IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n",
__func__);
break;
case GET_VALUE_BY_CLASS:
iriap_getvaluebyclass_indication(self, skb);
break;
}
/* skb will be cleaned up in iriap_data_indication */
}
/*
* Function iriap_watchdog_timer_expired (data)
*
* Query has taken too long time, so abort
*
*/
static void iriap_watchdog_timer_expired(void *data)
{
struct iriap_cb *self = (struct iriap_cb *) data;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
/* iriap_close(self); */
}
#ifdef CONFIG_PROC_FS
static const char *const ias_value_types[] = {
"IAS_MISSING",
"IAS_INTEGER",
"IAS_OCT_SEQ",
"IAS_STRING"
};
static inline struct ias_object *irias_seq_idx(loff_t pos)
{
struct ias_object *obj;
for (obj = (struct ias_object *) hashbin_get_first(irias_objects);
obj; obj = (struct ias_object *) hashbin_get_next(irias_objects)) {
if (pos-- == 0)
break;
}
return obj;
}
static void *irias_seq_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_irq(&irias_objects->hb_spinlock);
return *pos ? irias_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *irias_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN)
? (void *) hashbin_get_first(irias_objects)
: (void *) hashbin_get_next(irias_objects);
}
static void irias_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_irq(&irias_objects->hb_spinlock);
}
static int irias_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "LM-IAS Objects:\n");
else {
struct ias_object *obj = v;
struct ias_attrib *attrib;
IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -EINVAL;);
seq_printf(seq, "name: %s, id=%d\n",
obj->name, obj->id);
/* Careful for priority inversions here !
* All other uses of attrib spinlock are independent of
* the object spinlock, so we are safe. Jean II */
spin_lock(&obj->attribs->hb_spinlock);
/* List all attributes for this object */
for (attrib = (struct ias_attrib *) hashbin_get_first(obj->attribs);
attrib != NULL;
attrib = (struct ias_attrib *) hashbin_get_next(obj->attribs)) {
IRDA_ASSERT(attrib->magic == IAS_ATTRIB_MAGIC,
goto outloop; );
seq_printf(seq, " - Attribute name: \"%s\", ",
attrib->name);
seq_printf(seq, "value[%s]: ",
ias_value_types[attrib->value->type]);
switch (attrib->value->type) {
case IAS_INTEGER:
seq_printf(seq, "%d\n",
attrib->value->t.integer);
break;
case IAS_STRING:
seq_printf(seq, "\"%s\"\n",
attrib->value->t.string);
break;
case IAS_OCT_SEQ:
seq_printf(seq, "octet sequence (%d bytes)\n",
attrib->value->len);
break;
case IAS_MISSING:
seq_puts(seq, "missing\n");
break;
default:
seq_printf(seq, "type %d?\n",
attrib->value->type);
}
seq_putc(seq, '\n');
}
IRDA_ASSERT_LABEL(outloop:)
spin_unlock(&obj->attribs->hb_spinlock);
}
return 0;
}
static const struct seq_operations irias_seq_ops = {
.start = irias_seq_start,
.next = irias_seq_next,
.stop = irias_seq_stop,
.show = irias_seq_show,
};
static int irias_seq_open(struct inode *inode, struct file *file)
{
IRDA_ASSERT( irias_objects != NULL, return -EINVAL;);
return seq_open(file, &irias_seq_ops);
}
const struct file_operations irias_seq_fops = {
.owner = THIS_MODULE,
.open = irias_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* PROC_FS */
|
68,422,682,842,186 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose);
/*
* This routine purges all of the queues of frames.
*/
void rose_clear_queues(struct sock *sk)
{
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void rose_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
struct rose_sock *rose = rose_sk(sk);
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (rose->va != nr) {
while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) {
skb = skb_dequeue(&rose->ack_queue);
kfree_skb(skb);
rose->va = (rose->va + 1) % ROSE_MODULUS;
}
}
}
void rose_requeue_frames(struct sock *sk)
{
struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by rose_kick. This arrangement handles the possibility of an
* empty output queue.
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int rose_validate_nr(struct sock *sk, unsigned short nr)
{
struct rose_sock *rose = rose_sk(sk);
unsigned short vc = rose->va;
while (vc != rose->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ROSE_MODULUS;
}
return nr == rose->vs;
}
/*
* This routine is called when the packet layer internally generates a
* control frame.
*/
void rose_write_internal(struct sock *sk, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
char buffer[100];
int len, faclen = 0;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
faclen = rose_create_facilities(buffer, rose);
len += faclen;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_RESET_REQUEST:
len += 2;
break;
}
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
/*
* Space for AX.25 header and PID.
*/
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
dptr = skb_put(skb, skb_tailroom(skb));
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
switch (frametype) {
case ROSE_CALL_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0xAA;
memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, buffer, faclen);
dptr += faclen;
break;
case ROSE_CALL_ACCEPTED:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0x00; /* Address length */
*dptr++ = 0; /* Facilities length */
break;
case ROSE_CLEAR_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = rose->cause;
*dptr++ = rose->diagnostic;
break;
case ROSE_RESET_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_DTE_ORIGINATED;
*dptr++ = 0;
break;
case ROSE_RR:
case ROSE_RNR:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr = frametype;
*dptr++ |= (rose->vr << 5) & 0xE0;
break;
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_CONFIRMATION:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
break;
default:
printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype);
kfree_skb(skb);
return;
}
rose_transmit_link(skb, rose->neighbour);
}
int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
{
unsigned char *frame;
frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
case ROSE_CALL_REQUEST:
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_REQUEST:
case ROSE_RESET_CONFIRMATION:
return frame[2];
default:
break;
}
if ((frame[2] & 0x1F) == ROSE_RR ||
(frame[2] & 0x1F) == ROSE_RNR) {
*nr = (frame[2] >> 5) & 0x07;
return frame[2] & 0x1F;
}
if ((frame[2] & 0x01) == ROSE_DATA) {
*q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
*d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT;
*m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT;
*nr = (frame[2] >> 5) & 0x07;
*ns = (frame[2] >> 1) & 0x07;
return ROSE_DATA;
}
return ROSE_ILLEGAL;
}
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT)
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
else
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char l, n = 0;
char callsign[11];
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->source_call, callsign);
}
if (*p == FAC_CCITT_SRC_NSAP) {
memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->dest_call, callsign);
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
int rose_parse_facilities(unsigned char *p,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0)
return 0;
while (facilities_len > 0) {
if (*p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
facilities_len -= len + 1;
p += len + 1;
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
facilities_len -= len + 1;
p += len + 1;
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
facilities_len--;
p++;
break;
}
} else
break; /* Error in facilities format */
}
return 1;
}
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
{
unsigned char *p = buffer + 1;
char *callsign;
char buf[11];
int len, nb;
/* National Facilities */
if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) {
*p++ = 0x00;
*p++ = FAC_NATIONAL;
if (rose->rand != 0) {
*p++ = FAC_NATIONAL_RAND;
*p++ = (rose->rand >> 8) & 0xFF;
*p++ = (rose->rand >> 0) & 0xFF;
}
/* Sent before older facilities */
if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) {
int maxdigi = 0;
*p++ = FAC_NATIONAL_DIGIS;
*p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis);
for (nb = 0 ; nb < rose->source_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN);
p[6] |= AX25_HBIT;
p += AX25_ADDR_LEN;
}
for (nb = 0 ; nb < rose->dest_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN);
p[6] &= ~AX25_HBIT;
p += AX25_ADDR_LEN;
}
}
/* For compatibility */
if (rose->source_ndigis > 0) {
*p++ = FAC_NATIONAL_SRC_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
/* For compatibility */
if (rose->dest_ndigis > 0) {
*p++ = FAC_NATIONAL_DEST_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
}
*p++ = 0x00;
*p++ = FAC_CCITT;
*p++ = FAC_CCITT_DEST_NSAP;
callsign = ax2asc(buf, &rose->dest_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
*p++ = FAC_CCITT_SRC_NSAP;
callsign = ax2asc(buf, &rose->source_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->source_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
len = p - buffer;
buffer[0] = len - 1;
return len;
}
void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
{
struct rose_sock *rose = rose_sk(sk);
rose_stop_timer(sk);
rose_stop_idletimer(sk);
rose_clear_queues(sk);
rose->lci = 0;
rose->state = ROSE_STATE_0;
if (cause != -1)
rose->cause = cause;
if (diagnostic != -1)
rose->diagnostic = diagnostic;
sk->sk_state = TCP_CLOSE;
sk->sk_err = reason;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
| /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose);
/*
* This routine purges all of the queues of frames.
*/
void rose_clear_queues(struct sock *sk)
{
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void rose_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
struct rose_sock *rose = rose_sk(sk);
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (rose->va != nr) {
while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) {
skb = skb_dequeue(&rose->ack_queue);
kfree_skb(skb);
rose->va = (rose->va + 1) % ROSE_MODULUS;
}
}
}
void rose_requeue_frames(struct sock *sk)
{
struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by rose_kick. This arrangement handles the possibility of an
* empty output queue.
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int rose_validate_nr(struct sock *sk, unsigned short nr)
{
struct rose_sock *rose = rose_sk(sk);
unsigned short vc = rose->va;
while (vc != rose->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ROSE_MODULUS;
}
return nr == rose->vs;
}
/*
* This routine is called when the packet layer internally generates a
* control frame.
*/
void rose_write_internal(struct sock *sk, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
char buffer[100];
int len, faclen = 0;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
faclen = rose_create_facilities(buffer, rose);
len += faclen;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_RESET_REQUEST:
len += 2;
break;
}
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
/*
* Space for AX.25 header and PID.
*/
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
dptr = skb_put(skb, skb_tailroom(skb));
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
switch (frametype) {
case ROSE_CALL_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0xAA;
memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, buffer, faclen);
dptr += faclen;
break;
case ROSE_CALL_ACCEPTED:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0x00; /* Address length */
*dptr++ = 0; /* Facilities length */
break;
case ROSE_CLEAR_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = rose->cause;
*dptr++ = rose->diagnostic;
break;
case ROSE_RESET_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_DTE_ORIGINATED;
*dptr++ = 0;
break;
case ROSE_RR:
case ROSE_RNR:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr = frametype;
*dptr++ |= (rose->vr << 5) & 0xE0;
break;
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_CONFIRMATION:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
break;
default:
printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype);
kfree_skb(skb);
return;
}
rose_transmit_link(skb, rose->neighbour);
}
int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
{
unsigned char *frame;
frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
case ROSE_CALL_REQUEST:
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_REQUEST:
case ROSE_RESET_CONFIRMATION:
return frame[2];
default:
break;
}
if ((frame[2] & 0x1F) == ROSE_RR ||
(frame[2] & 0x1F) == ROSE_RNR) {
*nr = (frame[2] >> 5) & 0x07;
return frame[2] & 0x1F;
}
if ((frame[2] & 0x01) == ROSE_DATA) {
*q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
*d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT;
*m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT;
*nr = (frame[2] >> 5) & 0x07;
*ns = (frame[2] >> 1) & 0x07;
return ROSE_DATA;
}
return ROSE_ILLEGAL;
}
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT) {
if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
} else {
if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char l, n = 0;
char callsign[11];
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
/* Prevent overflows*/
if (l < 10 || l > 20)
return -1;
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->source_call, callsign);
}
if (*p == FAC_CCITT_SRC_NSAP) {
memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->dest_call, callsign);
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
int rose_parse_facilities(unsigned char *p,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0)
return 0;
while (facilities_len > 0) {
if (*p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
if (len < 0)
return 0;
facilities_len -= len + 1;
p += len + 1;
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
if (len < 0)
return 0;
facilities_len -= len + 1;
p += len + 1;
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
facilities_len--;
p++;
break;
}
} else
break; /* Error in facilities format */
}
return 1;
}
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
{
unsigned char *p = buffer + 1;
char *callsign;
char buf[11];
int len, nb;
/* National Facilities */
if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) {
*p++ = 0x00;
*p++ = FAC_NATIONAL;
if (rose->rand != 0) {
*p++ = FAC_NATIONAL_RAND;
*p++ = (rose->rand >> 8) & 0xFF;
*p++ = (rose->rand >> 0) & 0xFF;
}
/* Sent before older facilities */
if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) {
int maxdigi = 0;
*p++ = FAC_NATIONAL_DIGIS;
*p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis);
for (nb = 0 ; nb < rose->source_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN);
p[6] |= AX25_HBIT;
p += AX25_ADDR_LEN;
}
for (nb = 0 ; nb < rose->dest_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN);
p[6] &= ~AX25_HBIT;
p += AX25_ADDR_LEN;
}
}
/* For compatibility */
if (rose->source_ndigis > 0) {
*p++ = FAC_NATIONAL_SRC_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
/* For compatibility */
if (rose->dest_ndigis > 0) {
*p++ = FAC_NATIONAL_DEST_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
}
*p++ = 0x00;
*p++ = FAC_CCITT;
*p++ = FAC_CCITT_DEST_NSAP;
callsign = ax2asc(buf, &rose->dest_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
*p++ = FAC_CCITT_SRC_NSAP;
callsign = ax2asc(buf, &rose->source_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->source_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
len = p - buffer;
buffer[0] = len - 1;
return len;
}
void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
{
struct rose_sock *rose = rose_sk(sk);
rose_stop_timer(sk);
rose_stop_idletimer(sk);
rose_clear_queues(sk);
rose->lci = 0;
rose->state = ROSE_STATE_0;
if (cause != -1)
rose->cause = cause;
if (diagnostic != -1)
rose->diagnostic = diagnostic;
sk->sk_state = TCP_CLOSE;
sk->sk_err = reason;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
|
191,199,674,742,568 | /*
* Declarations of Rose type objects.
*
* Jonathan Naylor G4KLX 25/8/96
*/
#ifndef _ROSE_H
#define _ROSE_H
#include <linux/rose.h>
#include <net/sock.h>
#define ROSE_ADDR_LEN 5
#define ROSE_MIN_LEN 3
#define ROSE_GFI 0x10
#define ROSE_Q_BIT 0x80
#define ROSE_D_BIT 0x40
#define ROSE_M_BIT 0x10
#define ROSE_CALL_REQUEST 0x0B
#define ROSE_CALL_ACCEPTED 0x0F
#define ROSE_CLEAR_REQUEST 0x13
#define ROSE_CLEAR_CONFIRMATION 0x17
#define ROSE_DATA 0x00
#define ROSE_INTERRUPT 0x23
#define ROSE_INTERRUPT_CONFIRMATION 0x27
#define ROSE_RR 0x01
#define ROSE_RNR 0x05
#define ROSE_REJ 0x09
#define ROSE_RESET_REQUEST 0x1B
#define ROSE_RESET_CONFIRMATION 0x1F
#define ROSE_REGISTRATION_REQUEST 0xF3
#define ROSE_REGISTRATION_CONFIRMATION 0xF7
#define ROSE_RESTART_REQUEST 0xFB
#define ROSE_RESTART_CONFIRMATION 0xFF
#define ROSE_DIAGNOSTIC 0xF1
#define ROSE_ILLEGAL 0xFD
/* Define Link State constants. */
enum {
ROSE_STATE_0, /* Ready */
ROSE_STATE_1, /* Awaiting Call Accepted */
ROSE_STATE_2, /* Awaiting Clear Confirmation */
ROSE_STATE_3, /* Data Transfer */
ROSE_STATE_4, /* Awaiting Reset Confirmation */
ROSE_STATE_5 /* Deferred Call Acceptance */
};
#define ROSE_DEFAULT_T0 180000 /* Default T10 T20 value */
#define ROSE_DEFAULT_T1 200000 /* Default T11 T21 value */
#define ROSE_DEFAULT_T2 180000 /* Default T12 T22 value */
#define ROSE_DEFAULT_T3 180000 /* Default T13 T23 value */
#define ROSE_DEFAULT_HB 5000 /* Default Holdback value */
#define ROSE_DEFAULT_IDLE 0 /* No Activity Timeout - none */
#define ROSE_DEFAULT_ROUTING 1 /* Default routing flag */
#define ROSE_DEFAULT_FAIL_TIMEOUT 120000 /* Time until link considered usable */
#define ROSE_DEFAULT_MAXVC 50 /* Maximum number of VCs per neighbour */
#define ROSE_DEFAULT_WINDOW_SIZE 7 /* Default window size */
#define ROSE_MODULUS 8
#define ROSE_MAX_PACKET_SIZE 251 /* Maximum packet size */
#define ROSE_COND_ACK_PENDING 0x01
#define ROSE_COND_PEER_RX_BUSY 0x02
#define ROSE_COND_OWN_RX_BUSY 0x04
#define FAC_NATIONAL 0x00
#define FAC_CCITT 0x0F
#define FAC_NATIONAL_RAND 0x7F
#define FAC_NATIONAL_FLAGS 0x3F
#define FAC_NATIONAL_DEST_DIGI 0xE9
#define FAC_NATIONAL_SRC_DIGI 0xEB
#define FAC_NATIONAL_FAIL_CALL 0xED
#define FAC_NATIONAL_FAIL_ADD 0xEE
#define FAC_NATIONAL_DIGIS 0xEF
#define FAC_CCITT_DEST_NSAP 0xC9
#define FAC_CCITT_SRC_NSAP 0xCB
struct rose_neigh {
struct rose_neigh *next;
ax25_address callsign;
ax25_digi *digipeat;
ax25_cb *ax25;
struct net_device *dev;
unsigned short count;
unsigned short use;
unsigned int number;
char restarted;
char dce_mode;
char loopback;
struct sk_buff_head queue;
struct timer_list t0timer;
struct timer_list ftimer;
};
struct rose_node {
struct rose_node *next;
rose_address address;
unsigned short mask;
unsigned char count;
char loopback;
struct rose_neigh *neighbour[3];
};
struct rose_route {
struct rose_route *next;
unsigned int lci1, lci2;
rose_address src_addr, dest_addr;
ax25_address src_call, dest_call;
struct rose_neigh *neigh1, *neigh2;
unsigned int rand;
};
struct rose_sock {
struct sock sock;
rose_address source_addr, dest_addr;
ax25_address source_call, dest_call;
unsigned char source_ndigis, dest_ndigis;
ax25_address source_digis[ROSE_MAX_DIGIS];
ax25_address dest_digis[ROSE_MAX_DIGIS];
struct rose_neigh *neighbour;
struct net_device *device;
unsigned int lci, rand;
unsigned char state, condition, qbitincl, defer;
unsigned char cause, diagnostic;
unsigned short vs, vr, va, vl;
unsigned long t1, t2, t3, hb, idle;
#ifdef M_BIT
unsigned short fraglen;
struct sk_buff_head frag_queue;
#endif
struct sk_buff_head ack_queue;
struct rose_facilities_struct facilities;
struct timer_list timer;
struct timer_list idletimer;
};
#define rose_sk(sk) ((struct rose_sock *)(sk))
/* af_rose.c */
extern ax25_address rose_callsign;
extern int sysctl_rose_restart_request_timeout;
extern int sysctl_rose_call_request_timeout;
extern int sysctl_rose_reset_request_timeout;
extern int sysctl_rose_clear_request_timeout;
extern int sysctl_rose_no_activity_timeout;
extern int sysctl_rose_ack_hold_back_timeout;
extern int sysctl_rose_routing_control;
extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
extern int rosecmp(rose_address *, rose_address *);
extern int rosecmpm(rose_address *, rose_address *, unsigned short);
extern char *rose2asc(char *buf, const rose_address *);
extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
extern void rose_kill_by_neigh(struct rose_neigh *);
extern unsigned int rose_new_lci(struct rose_neigh *);
extern int rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
extern void rose_destroy_socket(struct sock *);
/* rose_dev.c */
extern void rose_setup(struct net_device *);
/* rose_in.c */
extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
/* rose_link.c */
extern void rose_start_ftimer(struct rose_neigh *);
extern void rose_stop_ftimer(struct rose_neigh *);
extern void rose_stop_t0timer(struct rose_neigh *);
extern int rose_ftimer_running(struct rose_neigh *);
extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
/* rose_loopback.c */
extern void rose_loopback_init(void);
extern void rose_loopback_clear(void);
extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
/* rose_out.c */
extern void rose_kick(struct sock *);
extern void rose_enquiry_response(struct sock *);
/* rose_route.c */
extern struct rose_neigh *rose_loopback_neigh;
extern const struct file_operations rose_neigh_fops;
extern const struct file_operations rose_nodes_fops;
extern const struct file_operations rose_routes_fops;
extern void rose_add_loopback_neigh(void);
extern int __must_check rose_add_loopback_node(rose_address *);
extern void rose_del_loopback_node(rose_address *);
extern void rose_rt_device_down(struct net_device *);
extern void rose_link_device_down(struct net_device *);
extern struct net_device *rose_dev_first(void);
extern struct net_device *rose_dev_get(rose_address *);
extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
extern int rose_rt_ioctl(unsigned int, void __user *);
extern void rose_link_failed(ax25_cb *, int);
extern int rose_route_frame(struct sk_buff *, ax25_cb *);
extern void rose_rt_free(void);
/* rose_subr.c */
extern void rose_clear_queues(struct sock *);
extern void rose_frames_acked(struct sock *, unsigned short);
extern void rose_requeue_frames(struct sock *);
extern int rose_validate_nr(struct sock *, unsigned short);
extern void rose_write_internal(struct sock *, int);
extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
extern void rose_disconnect(struct sock *, int, int, int);
/* rose_timer.c */
extern void rose_start_heartbeat(struct sock *);
extern void rose_start_t1timer(struct sock *);
extern void rose_start_t2timer(struct sock *);
extern void rose_start_t3timer(struct sock *);
extern void rose_start_hbtimer(struct sock *);
extern void rose_start_idletimer(struct sock *);
extern void rose_stop_heartbeat(struct sock *);
extern void rose_stop_timer(struct sock *);
extern void rose_stop_idletimer(struct sock *);
/* sysctl_net_rose.c */
extern void rose_register_sysctl(void);
extern void rose_unregister_sysctl(void);
#endif
| /*
* Declarations of Rose type objects.
*
* Jonathan Naylor G4KLX 25/8/96
*/
#ifndef _ROSE_H
#define _ROSE_H
#include <linux/rose.h>
#include <net/sock.h>
#define ROSE_ADDR_LEN 5
#define ROSE_MIN_LEN 3
#define ROSE_CALL_REQ_ADDR_LEN_OFF 3
#define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */
#define ROSE_CALL_REQ_DEST_ADDR_OFF 4
#define ROSE_CALL_REQ_SRC_ADDR_OFF 9
#define ROSE_CALL_REQ_FACILITIES_OFF 14
#define ROSE_GFI 0x10
#define ROSE_Q_BIT 0x80
#define ROSE_D_BIT 0x40
#define ROSE_M_BIT 0x10
#define ROSE_CALL_REQUEST 0x0B
#define ROSE_CALL_ACCEPTED 0x0F
#define ROSE_CLEAR_REQUEST 0x13
#define ROSE_CLEAR_CONFIRMATION 0x17
#define ROSE_DATA 0x00
#define ROSE_INTERRUPT 0x23
#define ROSE_INTERRUPT_CONFIRMATION 0x27
#define ROSE_RR 0x01
#define ROSE_RNR 0x05
#define ROSE_REJ 0x09
#define ROSE_RESET_REQUEST 0x1B
#define ROSE_RESET_CONFIRMATION 0x1F
#define ROSE_REGISTRATION_REQUEST 0xF3
#define ROSE_REGISTRATION_CONFIRMATION 0xF7
#define ROSE_RESTART_REQUEST 0xFB
#define ROSE_RESTART_CONFIRMATION 0xFF
#define ROSE_DIAGNOSTIC 0xF1
#define ROSE_ILLEGAL 0xFD
/* Define Link State constants. */
enum {
ROSE_STATE_0, /* Ready */
ROSE_STATE_1, /* Awaiting Call Accepted */
ROSE_STATE_2, /* Awaiting Clear Confirmation */
ROSE_STATE_3, /* Data Transfer */
ROSE_STATE_4, /* Awaiting Reset Confirmation */
ROSE_STATE_5 /* Deferred Call Acceptance */
};
#define ROSE_DEFAULT_T0 180000 /* Default T10 T20 value */
#define ROSE_DEFAULT_T1 200000 /* Default T11 T21 value */
#define ROSE_DEFAULT_T2 180000 /* Default T12 T22 value */
#define ROSE_DEFAULT_T3 180000 /* Default T13 T23 value */
#define ROSE_DEFAULT_HB 5000 /* Default Holdback value */
#define ROSE_DEFAULT_IDLE 0 /* No Activity Timeout - none */
#define ROSE_DEFAULT_ROUTING 1 /* Default routing flag */
#define ROSE_DEFAULT_FAIL_TIMEOUT 120000 /* Time until link considered usable */
#define ROSE_DEFAULT_MAXVC 50 /* Maximum number of VCs per neighbour */
#define ROSE_DEFAULT_WINDOW_SIZE 7 /* Default window size */
#define ROSE_MODULUS 8
#define ROSE_MAX_PACKET_SIZE 251 /* Maximum packet size */
#define ROSE_COND_ACK_PENDING 0x01
#define ROSE_COND_PEER_RX_BUSY 0x02
#define ROSE_COND_OWN_RX_BUSY 0x04
#define FAC_NATIONAL 0x00
#define FAC_CCITT 0x0F
#define FAC_NATIONAL_RAND 0x7F
#define FAC_NATIONAL_FLAGS 0x3F
#define FAC_NATIONAL_DEST_DIGI 0xE9
#define FAC_NATIONAL_SRC_DIGI 0xEB
#define FAC_NATIONAL_FAIL_CALL 0xED
#define FAC_NATIONAL_FAIL_ADD 0xEE
#define FAC_NATIONAL_DIGIS 0xEF
#define FAC_CCITT_DEST_NSAP 0xC9
#define FAC_CCITT_SRC_NSAP 0xCB
struct rose_neigh {
struct rose_neigh *next;
ax25_address callsign;
ax25_digi *digipeat;
ax25_cb *ax25;
struct net_device *dev;
unsigned short count;
unsigned short use;
unsigned int number;
char restarted;
char dce_mode;
char loopback;
struct sk_buff_head queue;
struct timer_list t0timer;
struct timer_list ftimer;
};
struct rose_node {
struct rose_node *next;
rose_address address;
unsigned short mask;
unsigned char count;
char loopback;
struct rose_neigh *neighbour[3];
};
struct rose_route {
struct rose_route *next;
unsigned int lci1, lci2;
rose_address src_addr, dest_addr;
ax25_address src_call, dest_call;
struct rose_neigh *neigh1, *neigh2;
unsigned int rand;
};
struct rose_sock {
struct sock sock;
rose_address source_addr, dest_addr;
ax25_address source_call, dest_call;
unsigned char source_ndigis, dest_ndigis;
ax25_address source_digis[ROSE_MAX_DIGIS];
ax25_address dest_digis[ROSE_MAX_DIGIS];
struct rose_neigh *neighbour;
struct net_device *device;
unsigned int lci, rand;
unsigned char state, condition, qbitincl, defer;
unsigned char cause, diagnostic;
unsigned short vs, vr, va, vl;
unsigned long t1, t2, t3, hb, idle;
#ifdef M_BIT
unsigned short fraglen;
struct sk_buff_head frag_queue;
#endif
struct sk_buff_head ack_queue;
struct rose_facilities_struct facilities;
struct timer_list timer;
struct timer_list idletimer;
};
#define rose_sk(sk) ((struct rose_sock *)(sk))
/* af_rose.c */
extern ax25_address rose_callsign;
extern int sysctl_rose_restart_request_timeout;
extern int sysctl_rose_call_request_timeout;
extern int sysctl_rose_reset_request_timeout;
extern int sysctl_rose_clear_request_timeout;
extern int sysctl_rose_no_activity_timeout;
extern int sysctl_rose_ack_hold_back_timeout;
extern int sysctl_rose_routing_control;
extern int sysctl_rose_link_fail_timeout;
extern int sysctl_rose_maximum_vcs;
extern int sysctl_rose_window_size;
extern int rosecmp(rose_address *, rose_address *);
extern int rosecmpm(rose_address *, rose_address *, unsigned short);
extern char *rose2asc(char *buf, const rose_address *);
extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
extern void rose_kill_by_neigh(struct rose_neigh *);
extern unsigned int rose_new_lci(struct rose_neigh *);
extern int rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
extern void rose_destroy_socket(struct sock *);
/* rose_dev.c */
extern void rose_setup(struct net_device *);
/* rose_in.c */
extern int rose_process_rx_frame(struct sock *, struct sk_buff *);
/* rose_link.c */
extern void rose_start_ftimer(struct rose_neigh *);
extern void rose_stop_ftimer(struct rose_neigh *);
extern void rose_stop_t0timer(struct rose_neigh *);
extern int rose_ftimer_running(struct rose_neigh *);
extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
/* rose_loopback.c */
extern void rose_loopback_init(void);
extern void rose_loopback_clear(void);
extern int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
/* rose_out.c */
extern void rose_kick(struct sock *);
extern void rose_enquiry_response(struct sock *);
/* rose_route.c */
extern struct rose_neigh *rose_loopback_neigh;
extern const struct file_operations rose_neigh_fops;
extern const struct file_operations rose_nodes_fops;
extern const struct file_operations rose_routes_fops;
extern void rose_add_loopback_neigh(void);
extern int __must_check rose_add_loopback_node(rose_address *);
extern void rose_del_loopback_node(rose_address *);
extern void rose_rt_device_down(struct net_device *);
extern void rose_link_device_down(struct net_device *);
extern struct net_device *rose_dev_first(void);
extern struct net_device *rose_dev_get(rose_address *);
extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
extern int rose_rt_ioctl(unsigned int, void __user *);
extern void rose_link_failed(ax25_cb *, int);
extern int rose_route_frame(struct sk_buff *, ax25_cb *);
extern void rose_rt_free(void);
/* rose_subr.c */
extern void rose_clear_queues(struct sock *);
extern void rose_frames_acked(struct sock *, unsigned short);
extern void rose_requeue_frames(struct sock *);
extern int rose_validate_nr(struct sock *, unsigned short);
extern void rose_write_internal(struct sock *, int);
extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
extern int rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
extern void rose_disconnect(struct sock *, int, int, int);
/* rose_timer.c */
extern void rose_start_heartbeat(struct sock *);
extern void rose_start_t1timer(struct sock *);
extern void rose_start_t2timer(struct sock *);
extern void rose_start_t3timer(struct sock *);
extern void rose_start_hbtimer(struct sock *);
extern void rose_start_idletimer(struct sock *);
extern void rose_stop_heartbeat(struct sock *);
extern void rose_stop_timer(struct sock *);
extern void rose_stop_idletimer(struct sock *);
/* sysctl_net_rose.c */
extern void rose_register_sysctl(void);
extern void rose_unregister_sysctl(void);
#endif
|
83,821,377,722,439 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
* Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/net_namespace.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/rose.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
static int rose_ndevs = 10;
int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
static HLIST_HEAD(rose_list);
static DEFINE_SPINLOCK(rose_list_lock);
static const struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
/*
* ROSE network devices are virtual network devices encapsulating ROSE
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
static struct lock_class_key rose_netdev_addr_lock_key;
static void rose_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
}
static void rose_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
}
/*
* Convert a ROSE address into text.
*/
char *rose2asc(char *buf, const rose_address *addr)
{
if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
addr->rose_addr[4] == 0x00) {
strcpy(buf, "*");
} else {
sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
addr->rose_addr[1] & 0xFF,
addr->rose_addr[2] & 0xFF,
addr->rose_addr[3] & 0xFF,
addr->rose_addr[4] & 0xFF);
}
return buf;
}
/*
* Compare two ROSE addresses, 0 == equal.
*/
int rosecmp(rose_address *addr1, rose_address *addr2)
{
int i;
for (i = 0; i < 5; i++)
if (addr1->rose_addr[i] != addr2->rose_addr[i])
return 1;
return 0;
}
/*
* Compare two ROSE addresses for only mask digits, 0 == equal.
*/
int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
{
unsigned int i, j;
if (mask > 10)
return 1;
for (i = 0; i < mask; i++) {
j = i / 2;
if ((i % 2) != 0) {
if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
return 1;
} else {
if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
return 1;
}
}
return 0;
}
/*
* Socket removal during an interrupt is now safe.
*/
static void rose_remove_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a broken link layer connection to a
* particular neighbour.
*/
void rose_kill_by_neigh(struct rose_neigh *neigh)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->neighbour == neigh) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->neighbour = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void rose_kill_by_device(struct net_device *dev)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->device = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Handle device status changes.
*/
static int rose_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = (struct net_device *)ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
switch (dev->type) {
case ARPHRD_ROSE:
rose_kill_by_device(dev);
break;
case ARPHRD_AX25:
rose_link_device_down(dev);
rose_rt_device_down(dev);
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void rose_insert_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_add_node(sk, &rose_list);
spin_unlock_bh(&rose_list_lock);
}
/*
* Find a socket that wants to accept the Call Request we just
* received.
*/
static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, call) &&
!rose->source_ndigis && s->sk_state == TCP_LISTEN)
goto found;
}
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, &null_ax25_address) &&
s->sk_state == TCP_LISTEN)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a connected ROSE socket given my LCI and device.
*/
struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a unique LCI for a given device.
*/
unsigned int rose_new_lci(struct rose_neigh *neigh)
{
int lci;
if (neigh->dce_mode) {
for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
} else {
for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
}
return 0;
}
/*
* Deferred destroy.
*/
void rose_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void rose_destroy_timer(unsigned long data)
{
rose_destroy_socket((struct sock *)data);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void rose_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
rose_remove_socket(sk);
rose_stop_heartbeat(sk);
rose_stop_idletimer(sk);
rose_stop_timer(sk);
rose_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
rose_start_heartbeat(skb->sk);
rose_sk(skb->sk)->state = ROSE_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
setup_timer(&sk->sk_timer, rose_destroy_timer,
(unsigned long)sk);
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* ROSE socket object.
*/
static int rose_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int opt;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(opt, (int __user *)optval))
return -EFAULT;
switch (optname) {
case ROSE_DEFER:
rose->defer = opt ? 1 : 0;
return 0;
case ROSE_T1:
if (opt < 1)
return -EINVAL;
rose->t1 = opt * HZ;
return 0;
case ROSE_T2:
if (opt < 1)
return -EINVAL;
rose->t2 = opt * HZ;
return 0;
case ROSE_T3:
if (opt < 1)
return -EINVAL;
rose->t3 = opt * HZ;
return 0;
case ROSE_HOLDBACK:
if (opt < 1)
return -EINVAL;
rose->hb = opt * HZ;
return 0;
case ROSE_IDLE:
if (opt < 0)
return -EINVAL;
rose->idle = opt * 60 * HZ;
return 0;
case ROSE_QBITINCL:
rose->qbitincl = opt ? 1 : 0;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rose_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int val = 0;
int len;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case ROSE_DEFER:
val = rose->defer;
break;
case ROSE_T1:
val = rose->t1 / HZ;
break;
case ROSE_T2:
val = rose->t2 / HZ;
break;
case ROSE_T3:
val = rose->t3 / HZ;
break;
case ROSE_HOLDBACK:
val = rose->hb / HZ;
break;
case ROSE_IDLE:
val = rose->idle / (60 * HZ);
break;
case ROSE_QBITINCL:
val = rose->qbitincl;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_LISTEN) {
struct rose_sock *rose = rose_sk(sk);
rose->dest_ndigis = 0;
memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
memset(&rose->dest_call, 0, AX25_ADDR_LEN);
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
return 0;
}
return -EOPNOTSUPP;
}
static struct proto rose_proto = {
.name = "ROSE",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rose_sock),
};
static int rose_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct rose_sock *rose;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
if (sk == NULL)
return -ENOMEM;
rose = rose_sk(sk);
sock_init_data(sock, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sock->ops = &rose_proto_ops;
sk->sk_protocol = protocol;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
rose->state = ROSE_STATE_0;
return 0;
}
static struct sock *rose_make_new(struct sock *osk)
{
struct sock *sk;
struct rose_sock *rose, *orose;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
if (sk == NULL)
return NULL;
rose = rose_sk(sk);
sock_init_data(NULL, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
init_timer(&rose->timer);
init_timer(&rose->idletimer);
orose = rose_sk(osk);
rose->t1 = orose->t1;
rose->t2 = orose->t2;
rose->t3 = orose->t3;
rose->hb = orose->hb;
rose->idle = orose->idle;
rose->defer = orose->defer;
rose->device = orose->device;
rose->qbitincl = orose->qbitincl;
return sk;
}
static int rose_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rose_sock *rose;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
rose = rose_sk(sk);
switch (rose->state) {
case ROSE_STATE_0:
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_2:
rose->neighbour->use--;
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_1:
case ROSE_STATE_3:
case ROSE_STATE_4:
case ROSE_STATE_5:
rose_clear_queues(sk);
rose_stop_idletimer(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_start_t3timer(sk);
rose->state = ROSE_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
struct net_device *dev;
ax25_address *source;
ax25_uid_assoc *user;
int n;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
return -EADDRNOTAVAIL;
}
source = &addr->srose_call;
user = ax25_findbyuid(current_euid());
if (user) {
rose->source_call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
rose->source_call = *source;
}
rose->source_addr = addr->srose_addr;
rose->device = dev;
rose->source_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->source_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->source_ndigis == 1) {
rose->source_digis[0] = addr->srose_digi;
}
}
rose_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
SOCK_DEBUG(sk, "ROSE: socket is bound\n");
return 0;
}
static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
unsigned char cause, diagnostic;
struct net_device *dev;
ax25_uid_assoc *user;
int n, err = 0;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
/* Connect completed during a ERESTARTSYS event */
sock->state = SS_CONNECTED;
goto out_release;
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
/* No reconnect on a seqpacket socket */
err = -EISCONN;
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
&diagnostic, 0);
if (!rose->neighbour) {
err = -ENETUNREACH;
goto out_release;
}
rose->lci = rose_new_lci(rose->neighbour);
if (!rose->lci) {
err = -ENETUNREACH;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = rose_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
goto out_release;
}
memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
rose->source_call = user->call;
rose->device = dev;
ax25_uid_put(user);
rose_insert_socket(sk); /* Finish the bind */
}
rose->dest_addr = addr->srose_addr;
rose->dest_call = addr->srose_call;
rose->rand = ((long)rose & 0xFFFF) + rose->lci;
rose->dest_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->dest_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->dest_ndigis == 1) {
rose->dest_digis[0] = addr->srose_digi;
}
}
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
rose->state = ROSE_STATE_1;
rose->neighbour->use++;
rose_write_internal(sk, ROSE_CALL_REQUEST);
rose_start_heartbeat(sk);
rose_start_t1timer(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
sk->sk_ack_backlog--;
out_release:
release_sock(sk);
return err;
}
static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int n;
memset(srose, 0, sizeof(*srose));
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
for (n = 0; n < rose->dest_ndigis; n++)
srose->srose_digis[n] = rose->dest_digis[n];
} else {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->source_addr;
srose->srose_call = rose->source_call;
srose->srose_ndigis = rose->source_ndigis;
for (n = 0; n < rose->source_ndigis; n++)
srose->srose_digis[n] = rose->source_digis[n];
}
*uaddr_len = sizeof(struct full_sockaddr_rose);
return 0;
}
int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
{
struct sock *sk;
struct sock *make;
struct rose_sock *make_rose;
struct rose_facilities_struct facilities;
int n, len;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the rose frame start
*/
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
return 0;
}
sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
/*
* We can't accept the Call Request.
*/
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = rose_make_new(sk)) == NULL) {
rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
return 0;
}
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
make_rose = rose_sk(make);
make_rose->lci = lci;
make_rose->dest_addr = facilities.dest_addr;
make_rose->dest_call = facilities.dest_call;
make_rose->dest_ndigis = facilities.dest_ndigis;
for (n = 0 ; n < facilities.dest_ndigis ; n++)
make_rose->dest_digis[n] = facilities.dest_digis[n];
make_rose->source_addr = facilities.source_addr;
make_rose->source_call = facilities.source_call;
make_rose->source_ndigis = facilities.source_ndigis;
for (n = 0 ; n < facilities.source_ndigis ; n++)
make_rose->source_digis[n]= facilities.source_digis[n];
make_rose->neighbour = neigh;
make_rose->device = dev;
make_rose->facilities = facilities;
make_rose->neighbour->use++;
if (rose_sk(sk)->defer) {
make_rose->state = ROSE_STATE_5;
} else {
rose_write_internal(make, ROSE_CALL_ACCEPTED);
make_rose->state = ROSE_STATE_3;
rose_start_idletimer(make);
}
make_rose->condition = 0x00;
make_rose->vs = 0;
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
sk->sk_ack_backlog++;
rose_insert_socket(make);
skb_queue_head(&sk->sk_receive_queue, skb);
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
return 1;
}
static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
int err;
struct full_sockaddr_rose srose;
struct sk_buff *skb;
unsigned char *asmptr;
int n, size, qbit = 0;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
if (sock_flag(sk, SOCK_ZAPPED))
return -EADDRNOTAVAIL;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
if (rose->neighbour == NULL || rose->device == NULL)
return -ENETUNREACH;
if (usrose != NULL) {
if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
return -EINVAL;
memset(&srose, 0, sizeof(struct full_sockaddr_rose));
memcpy(&srose, usrose, msg->msg_namelen);
if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
return -EISCONN;
if (srose.srose_ndigis != rose->dest_ndigis)
return -EISCONN;
if (srose.srose_ndigis == rose->dest_ndigis) {
for (n = 0 ; n < srose.srose_ndigis ; n++)
if (ax25cmp(&rose->dest_digis[n],
&srose.srose_digis[n]))
return -EISCONN;
}
if (srose.srose_family != AF_ROSE)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose.srose_family = AF_ROSE;
srose.srose_addr = rose->dest_addr;
srose.srose_call = rose->dest_call;
srose.srose_ndigis = rose->dest_ndigis;
for (n = 0 ; n < rose->dest_ndigis ; n++)
srose.srose_digis[n] = rose->dest_digis[n];
}
SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
/* Build a packet */
SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
/* Sanity check the packet size */
if (len > 65535)
return -EMSGSIZE;
size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
return err;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
/*
* Put the data on the end
*/
SOCK_DEBUG(sk, "ROSE: Appending user data\n");
skb_reset_transport_header(skb);
skb_put(skb, len);
err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
return err;
}
/*
* If the Q BIT Include socket option is in force, the first
* byte of the user data is the logical value of the Q Bit.
*/
if (rose->qbitincl) {
qbit = skb->data[0];
skb_pull(skb, 1);
}
/*
* Push down the ROSE header
*/
asmptr = skb_push(skb, ROSE_MIN_LEN);
SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
/* Build a ROSE Network header */
asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
asmptr[1] = (rose->lci >> 0) & 0xFF;
asmptr[2] = ROSE_DATA;
if (qbit)
asmptr[0] |= ROSE_Q_BIT;
SOCK_DEBUG(sk, "ROSE: Built header.\n");
SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
#ifdef M_BIT
#define ROSE_PACLEN (256-ROSE_MIN_LEN)
if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
unsigned char header[ROSE_MIN_LEN];
struct sk_buff *skbn;
int frontlen;
int lg;
/* Save a copy of the Header */
skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
skb_pull(skb, ROSE_MIN_LEN);
frontlen = skb_headroom(skb);
while (skb->len > 0) {
if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
kfree_skb(skb);
return err;
}
skbn->sk = sk;
skbn->free = 1;
skbn->arp = 1;
skb_reserve(skbn, frontlen);
lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
/* Copy the user data */
skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
skb_pull(skb, lg);
/* Duplicate the Header */
skb_push(skbn, ROSE_MIN_LEN);
skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
if (skb->len > 0)
skbn->data[2] |= M_BIT;
skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
skb->free = 1;
kfree_skb(skb);
} else {
skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
#else
skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
#endif
rose_kick(sk);
return len;
}
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
int n, er, qbit;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb_pull(skb, ROSE_MIN_LEN);
if (rose->qbitincl) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
for (n = 0 ; n < rose->dest_ndigis ; n++)
full_srose->srose_digis[n] = rose->dest_digis[n];
msg->msg_namelen = sizeof(struct full_sockaddr_rose);
} else {
if (rose->dest_ndigis >= 1) {
srose->srose_ndigis = 1;
srose->srose_digi = rose->dest_digis[0];
}
msg->msg_namelen = sizeof(struct sockaddr_rose);
}
}
skb_free_datagram(sk, skb);
return copied;
}
static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
void __user *argp = (void __user *)arg;
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
return put_user(amount, (unsigned int __user *) argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (unsigned int __user *) argp);
}
case SIOCGSTAMP:
return sock_get_timestamp(sk, (struct timeval __user *) argp);
case SIOCGSTAMPNS:
return sock_get_timestampns(sk, (struct timespec __user *) argp);
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCRSCLRRT:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return rose_rt_ioctl(cmd, argp);
case SIOCRSGCAUSE: {
struct rose_cause_struct rose_cause;
rose_cause.cause = rose->cause;
rose_cause.diagnostic = rose->diagnostic;
return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
}
case SIOCRSSCAUSE: {
struct rose_cause_struct rose_cause;
if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
return -EFAULT;
rose->cause = rose_cause.cause;
rose->diagnostic = rose_cause.diagnostic;
return 0;
}
case SIOCRSSL2CALL:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
return -EFAULT;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
return ax25_listen_register(&rose_callsign, NULL);
return 0;
case SIOCRSGL2CALL:
return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
case SIOCRSACCEPT:
if (rose->state == ROSE_STATE_5) {
rose_write_internal(sk, ROSE_CALL_ACCEPTED);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->vs = 0;
rose->va = 0;
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
}
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_list_lock)
{
spin_lock_bh(&rose_list_lock);
return seq_hlist_start_head(&rose_list, *pos);
}
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &rose_list, pos);
}
static void rose_info_stop(struct seq_file *seq, void *v)
__releases(rose_list_lock)
{
spin_unlock_bh(&rose_list_lock);
}
static int rose_info_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
struct sock *s = sk_entry(v);
struct rose_sock *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
if (!dev)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-10s %-9s ",
rose2asc(rsbuf, &rose->dest_addr),
ax2asc(buf, &rose->dest_call));
if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
callsign = "??????-?";
else
callsign = ax2asc(buf, &rose->source_call);
seq_printf(seq,
"%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
rose2asc(rsbuf, &rose->source_addr),
callsign,
devname,
rose->lci & 0x0FFF,
(rose->neighbour) ? rose->neighbour->number : 0,
rose->state,
rose->vs,
rose->vr,
rose->va,
ax25_display_timer(&rose->timer) / HZ,
rose->t1 / HZ,
rose->t2 / HZ,
rose->t3 / HZ,
rose->hb / HZ,
ax25_display_timer(&rose->idletimer) / (60 * HZ),
rose->idle / (60 * HZ),
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
}
return 0;
}
static const struct seq_operations rose_info_seqops = {
.start = rose_info_start,
.next = rose_info_next,
.stop = rose_info_stop,
.show = rose_info_show,
};
static int rose_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_info_seqops);
}
static const struct file_operations rose_info_fops = {
.owner = THIS_MODULE,
.open = rose_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family rose_family_ops = {
.family = PF_ROSE,
.create = rose_create,
.owner = THIS_MODULE,
};
static const struct proto_ops rose_proto_ops = {
.family = PF_ROSE,
.owner = THIS_MODULE,
.release = rose_release,
.bind = rose_bind,
.connect = rose_connect,
.socketpair = sock_no_socketpair,
.accept = rose_accept,
.getname = rose_getname,
.poll = datagram_poll,
.ioctl = rose_ioctl,
.listen = rose_listen,
.shutdown = sock_no_shutdown,
.setsockopt = rose_setsockopt,
.getsockopt = rose_getsockopt,
.sendmsg = rose_sendmsg,
.recvmsg = rose_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block rose_dev_notifier = {
.notifier_call = rose_device_event,
};
static struct net_device **dev_rose;
static struct ax25_protocol rose_pid = {
.pid = AX25_P_ROSE,
.func = rose_route_frame
};
static struct ax25_linkfail rose_linkfail_notifier = {
.func = rose_link_failed
};
static int __init rose_proto_init(void)
{
int i;
int rc;
if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
rc = -EINVAL;
goto out;
}
rc = proto_register(&rose_proto, 0);
if (rc != 0)
goto out;
rose_callsign = null_ax25_address;
dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "rose%d", i);
dev = alloc_netdev(0, name, rose_setup);
if (!dev) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
rc = -ENOMEM;
goto fail;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "ROSE: netdevice registration failed\n");
free_netdev(dev);
goto fail;
}
rose_set_lockdep_key(dev);
dev_rose[i] = dev;
}
sock_register(&rose_family_ops);
register_netdevice_notifier(&rose_dev_notifier);
ax25_register_pid(&rose_pid);
ax25_linkfail_register(&rose_linkfail_notifier);
#ifdef CONFIG_SYSCTL
rose_register_sysctl();
#endif
rose_loopback_init();
rose_add_loopback_neigh();
proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_rose[i]);
free_netdev(dev_rose[i]);
}
kfree(dev_rose);
out_proto_unregister:
proto_unregister(&rose_proto);
goto out;
}
module_init(rose_proto_init);
module_param(rose_ndevs, int, 0);
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ROSE);
static void __exit rose_exit(void)
{
int i;
proc_net_remove(&init_net, "rose");
proc_net_remove(&init_net, "rose_neigh");
proc_net_remove(&init_net, "rose_nodes");
proc_net_remove(&init_net, "rose_routes");
rose_loopback_clear();
rose_rt_free();
ax25_protocol_release(AX25_P_ROSE);
ax25_linkfail_release(&rose_linkfail_notifier);
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
#ifdef CONFIG_SYSCTL
rose_unregister_sysctl();
#endif
unregister_netdevice_notifier(&rose_dev_notifier);
sock_unregister(PF_ROSE);
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev = dev_rose[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_rose);
proto_unregister(&rose_proto);
}
module_exit(rose_exit);
| /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
* Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/net_namespace.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/rose.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/tcp_states.h>
#include <net/ip.h>
#include <net/arp.h>
static int rose_ndevs = 10;
int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
static HLIST_HEAD(rose_list);
static DEFINE_SPINLOCK(rose_list_lock);
static const struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
/*
* ROSE network devices are virtual network devices encapsulating ROSE
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
static struct lock_class_key rose_netdev_addr_lock_key;
static void rose_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
}
static void rose_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
}
/*
* Convert a ROSE address into text.
*/
char *rose2asc(char *buf, const rose_address *addr)
{
if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
addr->rose_addr[4] == 0x00) {
strcpy(buf, "*");
} else {
sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
addr->rose_addr[1] & 0xFF,
addr->rose_addr[2] & 0xFF,
addr->rose_addr[3] & 0xFF,
addr->rose_addr[4] & 0xFF);
}
return buf;
}
/*
* Compare two ROSE addresses, 0 == equal.
*/
int rosecmp(rose_address *addr1, rose_address *addr2)
{
int i;
for (i = 0; i < 5; i++)
if (addr1->rose_addr[i] != addr2->rose_addr[i])
return 1;
return 0;
}
/*
* Compare two ROSE addresses for only mask digits, 0 == equal.
*/
int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
{
unsigned int i, j;
if (mask > 10)
return 1;
for (i = 0; i < mask; i++) {
j = i / 2;
if ((i % 2) != 0) {
if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
return 1;
} else {
if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
return 1;
}
}
return 0;
}
/*
* Socket removal during an interrupt is now safe.
*/
static void rose_remove_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a broken link layer connection to a
* particular neighbour.
*/
void rose_kill_by_neigh(struct rose_neigh *neigh)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->neighbour == neigh) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->neighbour = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Kill all bound sockets on a dropped device.
*/
static void rose_kill_by_device(struct net_device *dev)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->device == dev) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
rose->neighbour->use--;
rose->device = NULL;
}
}
spin_unlock_bh(&rose_list_lock);
}
/*
* Handle device status changes.
*/
static int rose_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = (struct net_device *)ptr;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
switch (dev->type) {
case ARPHRD_ROSE:
rose_kill_by_device(dev);
break;
case ARPHRD_AX25:
rose_link_device_down(dev);
rose_rt_device_down(dev);
break;
}
return NOTIFY_DONE;
}
/*
* Add a socket to the bound sockets list.
*/
static void rose_insert_socket(struct sock *sk)
{
spin_lock_bh(&rose_list_lock);
sk_add_node(sk, &rose_list);
spin_unlock_bh(&rose_list_lock);
}
/*
* Find a socket that wants to accept the Call Request we just
* received.
*/
static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, call) &&
!rose->source_ndigis && s->sk_state == TCP_LISTEN)
goto found;
}
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (!rosecmp(&rose->source_addr, addr) &&
!ax25cmp(&rose->source_call, &null_ax25_address) &&
s->sk_state == TCP_LISTEN)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a connected ROSE socket given my LCI and device.
*/
struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
{
struct sock *s;
struct hlist_node *node;
spin_lock_bh(&rose_list_lock);
sk_for_each(s, node, &rose_list) {
struct rose_sock *rose = rose_sk(s);
if (rose->lci == lci && rose->neighbour == neigh)
goto found;
}
s = NULL;
found:
spin_unlock_bh(&rose_list_lock);
return s;
}
/*
* Find a unique LCI for a given device.
*/
unsigned int rose_new_lci(struct rose_neigh *neigh)
{
int lci;
if (neigh->dce_mode) {
for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
} else {
for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
return lci;
}
return 0;
}
/*
* Deferred destroy.
*/
void rose_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void rose_destroy_timer(unsigned long data)
{
rose_destroy_socket((struct sock *)data);
}
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void rose_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
rose_remove_socket(sk);
rose_stop_heartbeat(sk);
rose_stop_idletimer(sk);
rose_stop_timer(sk);
rose_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
rose_start_heartbeat(skb->sk);
rose_sk(skb->sk)->state = ROSE_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
setup_timer(&sk->sk_timer, rose_destroy_timer,
(unsigned long)sk);
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
/*
* Handling for system calls applied via the various interfaces to a
* ROSE socket object.
*/
static int rose_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int opt;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(opt, (int __user *)optval))
return -EFAULT;
switch (optname) {
case ROSE_DEFER:
rose->defer = opt ? 1 : 0;
return 0;
case ROSE_T1:
if (opt < 1)
return -EINVAL;
rose->t1 = opt * HZ;
return 0;
case ROSE_T2:
if (opt < 1)
return -EINVAL;
rose->t2 = opt * HZ;
return 0;
case ROSE_T3:
if (opt < 1)
return -EINVAL;
rose->t3 = opt * HZ;
return 0;
case ROSE_HOLDBACK:
if (opt < 1)
return -EINVAL;
rose->hb = opt * HZ;
return 0;
case ROSE_IDLE:
if (opt < 0)
return -EINVAL;
rose->idle = opt * 60 * HZ;
return 0;
case ROSE_QBITINCL:
rose->qbitincl = opt ? 1 : 0;
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rose_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int val = 0;
int len;
if (level != SOL_ROSE)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case ROSE_DEFER:
val = rose->defer;
break;
case ROSE_T1:
val = rose->t1 / HZ;
break;
case ROSE_T2:
val = rose->t2 / HZ;
break;
case ROSE_T3:
val = rose->t3 / HZ;
break;
case ROSE_HOLDBACK:
val = rose->hb / HZ;
break;
case ROSE_IDLE:
val = rose->idle / (60 * HZ);
break;
case ROSE_QBITINCL:
val = rose->qbitincl;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
static int rose_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_LISTEN) {
struct rose_sock *rose = rose_sk(sk);
rose->dest_ndigis = 0;
memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
memset(&rose->dest_call, 0, AX25_ADDR_LEN);
memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
return 0;
}
return -EOPNOTSUPP;
}
static struct proto rose_proto = {
.name = "ROSE",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rose_sock),
};
static int rose_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct rose_sock *rose;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
if (sk == NULL)
return -ENOMEM;
rose = rose_sk(sk);
sock_init_data(sock, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sock->ops = &rose_proto_ops;
sk->sk_protocol = protocol;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
rose->state = ROSE_STATE_0;
return 0;
}
static struct sock *rose_make_new(struct sock *osk)
{
struct sock *sk;
struct rose_sock *rose, *orose;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
if (sk == NULL)
return NULL;
rose = rose_sk(sk);
sock_init_data(NULL, sk);
skb_queue_head_init(&rose->ack_queue);
#ifdef M_BIT
skb_queue_head_init(&rose->frag_queue);
rose->fraglen = 0;
#endif
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
init_timer(&rose->timer);
init_timer(&rose->idletimer);
orose = rose_sk(osk);
rose->t1 = orose->t1;
rose->t2 = orose->t2;
rose->t3 = orose->t3;
rose->hb = orose->hb;
rose->idle = orose->idle;
rose->defer = orose->defer;
rose->device = orose->device;
rose->qbitincl = orose->qbitincl;
return sk;
}
static int rose_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rose_sock *rose;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
rose = rose_sk(sk);
switch (rose->state) {
case ROSE_STATE_0:
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_2:
rose->neighbour->use--;
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
rose_destroy_socket(sk);
break;
case ROSE_STATE_1:
case ROSE_STATE_3:
case ROSE_STATE_4:
case ROSE_STATE_5:
rose_clear_queues(sk);
rose_stop_idletimer(sk);
rose_write_internal(sk, ROSE_CLEAR_REQUEST);
rose_start_t3timer(sk);
rose->state = ROSE_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
struct net_device *dev;
ax25_address *source;
ax25_uid_assoc *user;
int n;
if (!sock_flag(sk, SOCK_ZAPPED))
return -EINVAL;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
return -EADDRNOTAVAIL;
}
source = &addr->srose_call;
user = ax25_findbyuid(current_euid());
if (user) {
rose->source_call = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
rose->source_call = *source;
}
rose->source_addr = addr->srose_addr;
rose->device = dev;
rose->source_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->source_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->source_ndigis == 1) {
rose->source_digis[0] = addr->srose_digi;
}
}
rose_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
SOCK_DEBUG(sk, "ROSE: socket is bound\n");
return 0;
}
static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
unsigned char cause, diagnostic;
struct net_device *dev;
ax25_uid_assoc *user;
int n, err = 0;
if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
return -EINVAL;
if (addr->srose_family != AF_ROSE)
return -EINVAL;
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
/* Connect completed during a ERESTARTSYS event */
sock->state = SS_CONNECTED;
goto out_release;
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
/* No reconnect on a seqpacket socket */
err = -EISCONN;
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
&diagnostic, 0);
if (!rose->neighbour) {
err = -ENETUNREACH;
goto out_release;
}
rose->lci = rose_new_lci(rose->neighbour);
if (!rose->lci) {
err = -ENETUNREACH;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = rose_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
goto out_release;
}
memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
rose->source_call = user->call;
rose->device = dev;
ax25_uid_put(user);
rose_insert_socket(sk); /* Finish the bind */
}
rose->dest_addr = addr->srose_addr;
rose->dest_call = addr->srose_call;
rose->rand = ((long)rose & 0xFFFF) + rose->lci;
rose->dest_ndigis = addr->srose_ndigis;
if (addr_len == sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
for (n = 0 ; n < addr->srose_ndigis ; n++)
rose->dest_digis[n] = full_addr->srose_digis[n];
} else {
if (rose->dest_ndigis == 1) {
rose->dest_digis[0] = addr->srose_digi;
}
}
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
rose->state = ROSE_STATE_1;
rose->neighbour->use++;
rose_write_internal(sk, ROSE_CALL_REQUEST);
rose_start_heartbeat(sk);
rose_start_t1timer(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
skb->sk = NULL;
kfree_skb(skb);
sk->sk_ack_backlog--;
out_release:
release_sock(sk);
return err;
}
static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
int n;
memset(srose, 0, sizeof(*srose));
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
for (n = 0; n < rose->dest_ndigis; n++)
srose->srose_digis[n] = rose->dest_digis[n];
} else {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->source_addr;
srose->srose_call = rose->source_call;
srose->srose_ndigis = rose->source_ndigis;
for (n = 0; n < rose->source_ndigis; n++)
srose->srose_digis[n] = rose->source_digis[n];
}
*uaddr_len = sizeof(struct full_sockaddr_rose);
return 0;
}
int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
{
struct sock *sk;
struct sock *make;
struct rose_sock *make_rose;
struct rose_facilities_struct facilities;
int n;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the rose frame start
*/
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
&facilities)) {
rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
return 0;
}
sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
/*
* We can't accept the Call Request.
*/
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = rose_make_new(sk)) == NULL) {
rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
return 0;
}
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
make_rose = rose_sk(make);
make_rose->lci = lci;
make_rose->dest_addr = facilities.dest_addr;
make_rose->dest_call = facilities.dest_call;
make_rose->dest_ndigis = facilities.dest_ndigis;
for (n = 0 ; n < facilities.dest_ndigis ; n++)
make_rose->dest_digis[n] = facilities.dest_digis[n];
make_rose->source_addr = facilities.source_addr;
make_rose->source_call = facilities.source_call;
make_rose->source_ndigis = facilities.source_ndigis;
for (n = 0 ; n < facilities.source_ndigis ; n++)
make_rose->source_digis[n]= facilities.source_digis[n];
make_rose->neighbour = neigh;
make_rose->device = dev;
make_rose->facilities = facilities;
make_rose->neighbour->use++;
if (rose_sk(sk)->defer) {
make_rose->state = ROSE_STATE_5;
} else {
rose_write_internal(make, ROSE_CALL_ACCEPTED);
make_rose->state = ROSE_STATE_3;
rose_start_idletimer(make);
}
make_rose->condition = 0x00;
make_rose->vs = 0;
make_rose->va = 0;
make_rose->vr = 0;
make_rose->vl = 0;
sk->sk_ack_backlog++;
rose_insert_socket(make);
skb_queue_head(&sk->sk_receive_queue, skb);
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
return 1;
}
static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
int err;
struct full_sockaddr_rose srose;
struct sk_buff *skb;
unsigned char *asmptr;
int n, size, qbit = 0;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
if (sock_flag(sk, SOCK_ZAPPED))
return -EADDRNOTAVAIL;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
if (rose->neighbour == NULL || rose->device == NULL)
return -ENETUNREACH;
if (usrose != NULL) {
if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
return -EINVAL;
memset(&srose, 0, sizeof(struct full_sockaddr_rose));
memcpy(&srose, usrose, msg->msg_namelen);
if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
return -EISCONN;
if (srose.srose_ndigis != rose->dest_ndigis)
return -EISCONN;
if (srose.srose_ndigis == rose->dest_ndigis) {
for (n = 0 ; n < srose.srose_ndigis ; n++)
if (ax25cmp(&rose->dest_digis[n],
&srose.srose_digis[n]))
return -EISCONN;
}
if (srose.srose_family != AF_ROSE)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
srose.srose_family = AF_ROSE;
srose.srose_addr = rose->dest_addr;
srose.srose_call = rose->dest_call;
srose.srose_ndigis = rose->dest_ndigis;
for (n = 0 ; n < rose->dest_ndigis ; n++)
srose.srose_digis[n] = rose->dest_digis[n];
}
SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
/* Build a packet */
SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
/* Sanity check the packet size */
if (len > 65535)
return -EMSGSIZE;
size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
return err;
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
/*
* Put the data on the end
*/
SOCK_DEBUG(sk, "ROSE: Appending user data\n");
skb_reset_transport_header(skb);
skb_put(skb, len);
err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
return err;
}
/*
* If the Q BIT Include socket option is in force, the first
* byte of the user data is the logical value of the Q Bit.
*/
if (rose->qbitincl) {
qbit = skb->data[0];
skb_pull(skb, 1);
}
/*
* Push down the ROSE header
*/
asmptr = skb_push(skb, ROSE_MIN_LEN);
SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
/* Build a ROSE Network header */
asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
asmptr[1] = (rose->lci >> 0) & 0xFF;
asmptr[2] = ROSE_DATA;
if (qbit)
asmptr[0] |= ROSE_Q_BIT;
SOCK_DEBUG(sk, "ROSE: Built header.\n");
SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
return -ENOTCONN;
}
#ifdef M_BIT
#define ROSE_PACLEN (256-ROSE_MIN_LEN)
if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
unsigned char header[ROSE_MIN_LEN];
struct sk_buff *skbn;
int frontlen;
int lg;
/* Save a copy of the Header */
skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
skb_pull(skb, ROSE_MIN_LEN);
frontlen = skb_headroom(skb);
while (skb->len > 0) {
if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
kfree_skb(skb);
return err;
}
skbn->sk = sk;
skbn->free = 1;
skbn->arp = 1;
skb_reserve(skbn, frontlen);
lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
/* Copy the user data */
skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
skb_pull(skb, lg);
/* Duplicate the Header */
skb_push(skbn, ROSE_MIN_LEN);
skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
if (skb->len > 0)
skbn->data[2] |= M_BIT;
skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
}
skb->free = 1;
kfree_skb(skb);
} else {
skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
}
#else
skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
#endif
rose_kick(sk);
return len;
}
static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
size_t copied;
unsigned char *asmptr;
struct sk_buff *skb;
int n, er, qbit;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
return er;
qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
skb_pull(skb, ROSE_MIN_LEN);
if (rose->qbitincl) {
asmptr = skb_push(skb, 1);
*asmptr = qbit;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
for (n = 0 ; n < rose->dest_ndigis ; n++)
full_srose->srose_digis[n] = rose->dest_digis[n];
msg->msg_namelen = sizeof(struct full_sockaddr_rose);
} else {
if (rose->dest_ndigis >= 1) {
srose->srose_ndigis = 1;
srose->srose_digi = rose->dest_digis[0];
}
msg->msg_namelen = sizeof(struct sockaddr_rose);
}
}
skb_free_datagram(sk, skb);
return copied;
}
static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct rose_sock *rose = rose_sk(sk);
void __user *argp = (void __user *)arg;
switch (cmd) {
case TIOCOUTQ: {
long amount;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
return put_user(amount, (unsigned int __user *) argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
return put_user(amount, (unsigned int __user *) argp);
}
case SIOCGSTAMP:
return sock_get_timestamp(sk, (struct timeval __user *) argp);
case SIOCGSTAMPNS:
return sock_get_timestampns(sk, (struct timespec __user *) argp);
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCRSCLRRT:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return rose_rt_ioctl(cmd, argp);
case SIOCRSGCAUSE: {
struct rose_cause_struct rose_cause;
rose_cause.cause = rose->cause;
rose_cause.diagnostic = rose->diagnostic;
return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
}
case SIOCRSSCAUSE: {
struct rose_cause_struct rose_cause;
if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
return -EFAULT;
rose->cause = rose_cause.cause;
rose->diagnostic = rose_cause.diagnostic;
return 0;
}
case SIOCRSSL2CALL:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
return -EFAULT;
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
return ax25_listen_register(&rose_callsign, NULL);
return 0;
case SIOCRSGL2CALL:
return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
case SIOCRSACCEPT:
if (rose->state == ROSE_STATE_5) {
rose_write_internal(sk, ROSE_CALL_ACCEPTED);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->vs = 0;
rose->va = 0;
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
}
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_list_lock)
{
spin_lock_bh(&rose_list_lock);
return seq_hlist_start_head(&rose_list, *pos);
}
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &rose_list, pos);
}
static void rose_info_stop(struct seq_file *seq, void *v)
__releases(rose_list_lock)
{
spin_unlock_bh(&rose_list_lock);
}
static int rose_info_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
struct sock *s = sk_entry(v);
struct rose_sock *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
if (!dev)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-10s %-9s ",
rose2asc(rsbuf, &rose->dest_addr),
ax2asc(buf, &rose->dest_call));
if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
callsign = "??????-?";
else
callsign = ax2asc(buf, &rose->source_call);
seq_printf(seq,
"%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
rose2asc(rsbuf, &rose->source_addr),
callsign,
devname,
rose->lci & 0x0FFF,
(rose->neighbour) ? rose->neighbour->number : 0,
rose->state,
rose->vs,
rose->vr,
rose->va,
ax25_display_timer(&rose->timer) / HZ,
rose->t1 / HZ,
rose->t2 / HZ,
rose->t3 / HZ,
rose->hb / HZ,
ax25_display_timer(&rose->idletimer) / (60 * HZ),
rose->idle / (60 * HZ),
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
}
return 0;
}
static const struct seq_operations rose_info_seqops = {
.start = rose_info_start,
.next = rose_info_next,
.stop = rose_info_stop,
.show = rose_info_show,
};
static int rose_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_info_seqops);
}
static const struct file_operations rose_info_fops = {
.owner = THIS_MODULE,
.open = rose_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family rose_family_ops = {
.family = PF_ROSE,
.create = rose_create,
.owner = THIS_MODULE,
};
static const struct proto_ops rose_proto_ops = {
.family = PF_ROSE,
.owner = THIS_MODULE,
.release = rose_release,
.bind = rose_bind,
.connect = rose_connect,
.socketpair = sock_no_socketpair,
.accept = rose_accept,
.getname = rose_getname,
.poll = datagram_poll,
.ioctl = rose_ioctl,
.listen = rose_listen,
.shutdown = sock_no_shutdown,
.setsockopt = rose_setsockopt,
.getsockopt = rose_getsockopt,
.sendmsg = rose_sendmsg,
.recvmsg = rose_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block rose_dev_notifier = {
.notifier_call = rose_device_event,
};
static struct net_device **dev_rose;
static struct ax25_protocol rose_pid = {
.pid = AX25_P_ROSE,
.func = rose_route_frame
};
static struct ax25_linkfail rose_linkfail_notifier = {
.func = rose_link_failed
};
static int __init rose_proto_init(void)
{
int i;
int rc;
if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
rc = -EINVAL;
goto out;
}
rc = proto_register(&rose_proto, 0);
if (rc != 0)
goto out;
rose_callsign = null_ax25_address;
dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];
sprintf(name, "rose%d", i);
dev = alloc_netdev(0, name, rose_setup);
if (!dev) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
rc = -ENOMEM;
goto fail;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "ROSE: netdevice registration failed\n");
free_netdev(dev);
goto fail;
}
rose_set_lockdep_key(dev);
dev_rose[i] = dev;
}
sock_register(&rose_family_ops);
register_netdevice_notifier(&rose_dev_notifier);
ax25_register_pid(&rose_pid);
ax25_linkfail_register(&rose_linkfail_notifier);
#ifdef CONFIG_SYSCTL
rose_register_sysctl();
#endif
rose_loopback_init();
rose_add_loopback_neigh();
proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_rose[i]);
free_netdev(dev_rose[i]);
}
kfree(dev_rose);
out_proto_unregister:
proto_unregister(&rose_proto);
goto out;
}
module_init(rose_proto_init);
module_param(rose_ndevs, int, 0);
MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ROSE);
static void __exit rose_exit(void)
{
int i;
proc_net_remove(&init_net, "rose");
proc_net_remove(&init_net, "rose_neigh");
proc_net_remove(&init_net, "rose_nodes");
proc_net_remove(&init_net, "rose_routes");
rose_loopback_clear();
rose_rt_free();
ax25_protocol_release(AX25_P_ROSE);
ax25_linkfail_release(&rose_linkfail_notifier);
if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
ax25_listen_release(&rose_callsign, NULL);
#ifdef CONFIG_SYSCTL
rose_unregister_sysctl();
#endif
unregister_netdevice_notifier(&rose_dev_notifier);
sock_unregister(PF_ROSE);
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev = dev_rose[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_rose);
proto_unregister(&rose_proto);
}
module_exit(rose_exit);
|
36,346,335,786,319 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/timer.h>
#include <net/ax25.h>
#include <linux/skbuff.h>
#include <net/rose.h>
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
void rose_loopback_init(void)
{
skb_queue_head_init(&loopback_queue);
init_timer(&loopback_timer);
}
static int rose_loopback_running(void)
{
return timer_pending(&loopback_timer);
}
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
struct sk_buff *skbn;
skbn = skb_clone(skb, GFP_ATOMIC);
kfree_skb(skb);
if (skbn != NULL) {
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
}
return 1;
}
static void rose_loopback_timer(unsigned long);
static void rose_set_loopback_timer(void)
{
del_timer(&loopback_timer);
loopback_timer.data = 0;
loopback_timer.function = &rose_loopback_timer;
loopback_timer.expires = jiffies + 10;
add_timer(&loopback_timer);
}
static void rose_loopback_timer(unsigned long param)
{
struct sk_buff *skb;
struct net_device *dev;
rose_address *dest;
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
frametype = skb->data[2];
dest = (rose_address *)(skb->data + 4);
lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
skb_reset_transport_header(skb);
sk = rose_find_socket(lci_o, rose_loopback_neigh);
if (sk) {
if (rose_process_rx_frame(sk, skb) == 0)
kfree_skb(skb);
continue;
}
if (frametype == ROSE_CALL_REQUEST) {
if ((dev = rose_dev_get(dest)) != NULL) {
if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
kfree_skb(skb);
} else {
kfree_skb(skb);
}
} else {
kfree_skb(skb);
}
}
}
void __exit rose_loopback_clear(void)
{
struct sk_buff *skb;
del_timer(&loopback_timer);
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
skb->sk = NULL;
kfree_skb(skb);
}
}
| /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/timer.h>
#include <net/ax25.h>
#include <linux/skbuff.h>
#include <net/rose.h>
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
void rose_loopback_init(void)
{
skb_queue_head_init(&loopback_queue);
init_timer(&loopback_timer);
}
static int rose_loopback_running(void)
{
return timer_pending(&loopback_timer);
}
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
struct sk_buff *skbn;
skbn = skb_clone(skb, GFP_ATOMIC);
kfree_skb(skb);
if (skbn != NULL) {
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
}
return 1;
}
static void rose_loopback_timer(unsigned long);
static void rose_set_loopback_timer(void)
{
del_timer(&loopback_timer);
loopback_timer.data = 0;
loopback_timer.function = &rose_loopback_timer;
loopback_timer.expires = jiffies + 10;
add_timer(&loopback_timer);
}
static void rose_loopback_timer(unsigned long param)
{
struct sk_buff *skb;
struct net_device *dev;
rose_address *dest;
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
if (skb->len < ROSE_MIN_LEN) {
kfree_skb(skb);
continue;
}
lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
frametype = skb->data[2];
if (frametype == ROSE_CALL_REQUEST &&
(skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
ROSE_CALL_REQ_ADDR_LEN_VAL)) {
kfree_skb(skb);
continue;
}
dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
skb_reset_transport_header(skb);
sk = rose_find_socket(lci_o, rose_loopback_neigh);
if (sk) {
if (rose_process_rx_frame(sk, skb) == 0)
kfree_skb(skb);
continue;
}
if (frametype == ROSE_CALL_REQUEST) {
if ((dev = rose_dev_get(dest)) != NULL) {
if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
kfree_skb(skb);
} else {
kfree_skb(skb);
}
} else {
kfree_skb(skb);
}
}
}
void __exit rose_loopback_clear(void)
{
struct sk_buff *skb;
del_timer(&loopback_timer);
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
skb->sk = NULL;
kfree_skb(skb);
}
}
|
209,673,985,093,604 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/arp.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/netfilter.h>
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
static unsigned int rose_neigh_no = 1;
static struct rose_node *rose_node_list;
static DEFINE_SPINLOCK(rose_node_list_lock);
static struct rose_neigh *rose_neigh_list;
static DEFINE_SPINLOCK(rose_neigh_list_lock);
static struct rose_route *rose_route_list;
static DEFINE_SPINLOCK(rose_route_list_lock);
struct rose_neigh *rose_loopback_neigh;
/*
* Add a new route to a node, and in the process add the node and the
* neighbour if it is new.
*/
static int __must_check rose_add_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node, *rose_tmpn, *rose_tmpp;
struct rose_neigh *rose_neigh;
int i, res = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node != NULL && rose_node->loopback) {
res = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC);
if (rose_neigh == NULL) {
res = -ENOMEM;
goto out;
}
rose_neigh->callsign = rose_route->neighbour;
rose_neigh->digipeat = NULL;
rose_neigh->ax25 = NULL;
rose_neigh->dev = dev;
rose_neigh->count = 0;
rose_neigh->use = 0;
rose_neigh->dce_mode = 0;
rose_neigh->loopback = 0;
rose_neigh->number = rose_neigh_no++;
rose_neigh->restarted = 0;
skb_queue_head_init(&rose_neigh->queue);
init_timer(&rose_neigh->ftimer);
init_timer(&rose_neigh->t0timer);
if (rose_route->ndigis != 0) {
rose_neigh->digipeat =
kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
if (rose_neigh->digipeat == NULL) {
kfree(rose_neigh);
res = -ENOMEM;
goto out;
}
rose_neigh->digipeat->ndigi = rose_route->ndigis;
rose_neigh->digipeat->lastrepeat = -1;
for (i = 0; i < rose_route->ndigis; i++) {
rose_neigh->digipeat->calls[i] =
rose_route->digipeaters[i];
rose_neigh->digipeat->repeated[i] = 0;
}
}
rose_neigh->next = rose_neigh_list;
rose_neigh_list = rose_neigh;
}
/*
* This is a new node to be inserted into the list. Find where it needs
* to be inserted into the list, and insert it. We want to be sure
* to order the list in descending order of mask size to ensure that
* later when we are searching this list the first match will be the
* best match.
*/
if (rose_node == NULL) {
rose_tmpn = rose_node_list;
rose_tmpp = NULL;
while (rose_tmpn != NULL) {
if (rose_tmpn->mask > rose_route->mask) {
rose_tmpp = rose_tmpn;
rose_tmpn = rose_tmpn->next;
} else {
break;
}
}
/* create new node */
rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC);
if (rose_node == NULL) {
res = -ENOMEM;
goto out;
}
rose_node->address = rose_route->address;
rose_node->mask = rose_route->mask;
rose_node->count = 1;
rose_node->loopback = 0;
rose_node->neighbour[0] = rose_neigh;
if (rose_tmpn == NULL) {
if (rose_tmpp == NULL) { /* Empty list */
rose_node_list = rose_node;
rose_node->next = NULL;
} else {
rose_tmpp->next = rose_node;
rose_node->next = NULL;
}
} else {
if (rose_tmpp == NULL) { /* 1st node */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
} else {
rose_tmpp->next = rose_node;
rose_node->next = rose_tmpn;
}
}
rose_neigh->count++;
goto out;
}
/* We have space, slot it in */
if (rose_node->count < 3) {
rose_node->neighbour[rose_node->count] = rose_neigh;
rose_node->count++;
rose_neigh->count++;
}
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Caller is holding rose_node_list_lock.
*/
static void rose_remove_node(struct rose_node *rose_node)
{
struct rose_node *s;
if ((s = rose_node_list) == rose_node) {
rose_node_list = rose_node->next;
kfree(rose_node);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_node) {
s->next = rose_node->next;
kfree(rose_node);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_neigh_list_lock.
*/
static void rose_remove_neigh(struct rose_neigh *rose_neigh)
{
struct rose_neigh *s;
rose_stop_ftimer(rose_neigh);
rose_stop_t0timer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_route_list_lock.
*/
static void rose_remove_route(struct rose_route *rose_route)
{
struct rose_route *s;
if (rose_route->neigh1 != NULL)
rose_route->neigh1->use--;
if (rose_route->neigh2 != NULL)
rose_route->neigh2->use--;
if ((s = rose_route_list) == rose_route) {
rose_route_list = rose_route->next;
kfree(rose_route);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_route) {
s->next = rose_route->next;
kfree(rose_route);
return;
}
s = s->next;
}
}
/*
* "Delete" a node. Strictly speaking remove a route to a node. The node
* is only deleted if no routes are left to it.
*/
static int rose_del_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node;
struct rose_neigh *rose_neigh;
int i, err = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node == NULL || rose_node->loopback) {
err = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
err = -EINVAL;
goto out;
}
for (i = 0; i < rose_node->count; i++) {
if (rose_node->neighbour[i] == rose_neigh) {
rose_neigh->count--;
if (rose_neigh->count == 0 && rose_neigh->use == 0)
rose_remove_neigh(rose_neigh);
rose_node->count--;
if (rose_node->count == 0) {
rose_remove_node(rose_node);
} else {
switch (i) {
case 0:
rose_node->neighbour[0] =
rose_node->neighbour[1];
case 1:
rose_node->neighbour[1] =
rose_node->neighbour[2];
case 2:
break;
}
}
goto out;
}
}
err = -EINVAL;
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Add the loopback neighbour.
*/
void rose_add_loopback_neigh(void)
{
struct rose_neigh *sn;
rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
if (!rose_loopback_neigh)
return;
sn = rose_loopback_neigh;
sn->callsign = null_ax25_address;
sn->digipeat = NULL;
sn->ax25 = NULL;
sn->dev = NULL;
sn->count = 0;
sn->use = 0;
sn->dce_mode = 1;
sn->loopback = 1;
sn->number = rose_neigh_no++;
sn->restarted = 1;
skb_queue_head_init(&sn->queue);
init_timer(&sn->ftimer);
init_timer(&sn->t0timer);
spin_lock_bh(&rose_neigh_list_lock);
sn->next = rose_neigh_list;
rose_neigh_list = sn;
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* Add a loopback node.
*/
int rose_add_loopback_node(rose_address *address)
{
struct rose_node *rose_node;
int err = 0;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node != NULL)
goto out;
if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL) {
err = -ENOMEM;
goto out;
}
rose_node->address = *address;
rose_node->mask = 10;
rose_node->count = 1;
rose_node->loopback = 1;
rose_node->neighbour[0] = rose_loopback_neigh;
/* Insert at the head of list. Address is always mask=10 */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
rose_loopback_neigh->count++;
out:
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Delete a loopback node.
*/
void rose_del_loopback_node(rose_address *address)
{
struct rose_node *rose_node;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node == NULL)
goto out;
rose_remove_node(rose_node);
rose_loopback_neigh->count--;
out:
spin_unlock_bh(&rose_node_list_lock);
}
/*
* A device has been removed. Remove its routes and neighbours.
*/
void rose_rt_device_down(struct net_device *dev)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
int i;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->dev != dev)
continue;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
for (i = 0; i < t->count; i++) {
if (t->neighbour[i] != s)
continue;
t->count--;
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
case 1:
t->neighbour[1] = t->neighbour[2];
case 2:
break;
}
}
if (t->count <= 0)
rose_remove_node(t);
}
rose_remove_neigh(s);
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
}
#if 0 /* Currently unused */
/*
* A device has been removed. Remove its links.
*/
void rose_route_device_down(struct net_device *dev)
{
struct rose_route *s, *rose_route;
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
s = rose_route;
rose_route = rose_route->next;
if (s->neigh1->dev == dev || s->neigh2->dev == dev)
rose_remove_route(s);
}
spin_unlock_bh(&rose_route_list_lock);
}
#endif
/*
* Clear all nodes and neighbours out, except for neighbours with
* active connections going through them.
* Do not clear loopback neighbour and nodes.
*/
static int rose_clear_routes(void)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
if (!t->loopback)
rose_remove_node(t);
}
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->use == 0 && !s->loopback) {
s->count = 0;
rose_remove_neigh(s);
}
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return 0;
}
/*
* Check that the device given is a valid AX.25 interface that is "up".
* called whith RTNL
*/
static struct net_device *rose_ax25_dev_find(char *devname)
{
struct net_device *dev;
if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
return NULL;
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
return dev;
return NULL;
}
/*
* Find the first active ROSE device, usually "rose0".
*/
struct net_device *rose_dev_first(void)
{
struct net_device *dev, *first = NULL;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
rcu_read_unlock();
return first;
}
/*
* Find the ROSE device for the given address.
*/
struct net_device *rose_dev_get(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
}
dev = NULL;
out:
rcu_read_unlock();
return dev;
}
static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
rcu_read_unlock();
return dev != NULL;
}
struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neigh)
{
struct rose_route *rose_route;
for (rose_route = rose_route_list; rose_route != NULL; rose_route = rose_route->next)
if ((rose_route->neigh1 == neigh && rose_route->lci1 == lci) ||
(rose_route->neigh2 == neigh && rose_route->lci2 == lci))
return rose_route;
return NULL;
}
/*
* Find a neighbour or a route given a ROSE address.
*/
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
goto out;
}
}
}
}
if (!route_frame) { /* connect request */
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
failed = 0;
goto out;
}
failed = 1;
}
}
}
}
if (failed) {
*cause = ROSE_OUT_OF_ORDER;
*diagnostic = 0;
} else {
*cause = ROSE_NOT_OBTAINABLE;
*diagnostic = 0;
}
out:
if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Handle the ioctls that control the routing functions.
*/
int rose_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct rose_route_struct rose_route;
struct net_device *dev;
int err;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
return err;
case SIOCRSCLRRT:
return rose_clear_routes();
default:
return -EINVAL;
}
return 0;
}
static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
{
struct rose_route *rose_route, *s;
rose_neigh->restarted = 0;
rose_stop_t0timer(rose_neigh);
rose_start_ftimer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
if ((rose_route->neigh1 == rose_neigh && rose_route->neigh2 == rose_neigh) ||
(rose_route->neigh1 == rose_neigh && rose_route->neigh2 == NULL) ||
(rose_route->neigh2 == rose_neigh && rose_route->neigh1 == NULL)) {
s = rose_route->next;
rose_remove_route(rose_route);
rose_route = s;
continue;
}
if (rose_route->neigh1 == rose_neigh) {
rose_route->neigh1->use--;
rose_route->neigh1 = NULL;
rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
}
if (rose_route->neigh2 == rose_neigh) {
rose_route->neigh2->use--;
rose_route->neigh2 = NULL;
rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
}
rose_route = rose_route->next;
}
spin_unlock_bh(&rose_route_list_lock);
}
/*
* A level 2 link has timed out, therefore it appears to be a poor link,
* then don't use that neighbour until it is reset. Blow away all through
* routes and connections using this route.
*/
void rose_link_failed(ax25_cb *ax25, int reason)
{
struct rose_neigh *rose_neigh;
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (rose_neigh->ax25 == ax25)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh != NULL) {
rose_neigh->ax25 = NULL;
ax25_cb_put(ax25);
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* A device has been "downed" remove its link status. Blow away all
* through routes and connections that use this device.
*/
void rose_link_device_down(struct net_device *dev)
{
struct rose_neigh *rose_neigh;
for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next) {
if (rose_neigh->dev == dev) {
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
}
}
/*
* Route a frame to an appropriate AX.25 connection.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
struct rose_neigh *rose_neigh, *new_neigh;
struct rose_route *rose_route;
struct rose_facilities_struct facilities;
rose_address *src_addr, *dest_addr;
struct sock *sk;
unsigned short frametype;
unsigned int lci, new_lci;
unsigned char cause, diagnostic;
struct net_device *dev;
int len, res = 0;
char buf[11];
#if 0
if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT)
return res;
#endif
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
src_addr = (rose_address *)(skb->data + 9);
dest_addr = (rose_address *)(skb->data + 4);
spin_lock_bh(&rose_neigh_list_lock);
spin_lock_bh(&rose_route_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&ax25->dest_addr, &rose_neigh->callsign) == 0 &&
ax25->ax25_dev->dev == rose_neigh->dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
printk("rose_route : unknown neighbour or device %s\n",
ax2asc(buf, &ax25->dest_addr));
goto out;
}
/*
* Obviously the link is working, halt the ftimer.
*/
rose_stop_ftimer(rose_neigh);
/*
* LCI of zero is always for us, and its always a restart
* frame.
*/
if (lci == 0) {
rose_link_rx_restart(skb, rose_neigh, frametype);
goto out;
}
/*
* Find an existing socket.
*/
if ((sk = rose_find_socket(lci, rose_neigh)) != NULL) {
if (frametype == ROSE_CALL_REQUEST) {
struct rose_sock *rose = rose_sk(sk);
/* Remove an existing unused socket */
rose_clear_queues(sk);
rose->cause = ROSE_NETWORK_CONGESTION;
rose->diagnostic = 0;
rose->neighbour->use--;
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
sk->sk_state = TCP_CLOSE;
sk->sk_err = 0;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
else {
skb_reset_transport_header(skb);
res = rose_process_rx_frame(sk, skb);
goto out;
}
}
/*
* Is is a Call Request and is it for us ?
*/
if (frametype == ROSE_CALL_REQUEST)
if ((dev = rose_dev_get(dest_addr)) != NULL) {
res = rose_rx_call_request(skb, dev, rose_neigh, lci);
dev_put(dev);
goto out;
}
if (!sysctl_rose_routing_control) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 0);
goto out;
}
/*
* Route it to the next in line if we have an entry for it.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->lci1 == lci &&
rose_route->neigh1 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh2 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
if (rose_route->lci2 == lci &&
rose_route->neigh2 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh1 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci1 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci1 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh1);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
rose_route = rose_route->next;
}
/*
* We know that:
* 1. The frame isn't for us,
* 2. It isn't "owned" by any existing route.
*/
if (frametype != ROSE_CALL_REQUEST) { /* XXX */
res = 0;
goto out;
}
len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76);
goto out;
}
/*
* Check for routing loops.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->rand == facilities.rand &&
rosecmp(src_addr, &rose_route->src_addr) == 0 &&
ax25cmp(&facilities.dest_call, &rose_route->src_call) == 0 &&
ax25cmp(&facilities.source_call, &rose_route->dest_call) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 120);
goto out;
}
rose_route = rose_route->next;
}
if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic);
goto out;
}
if ((new_lci = rose_new_lci(new_neigh)) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
goto out;
}
if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
goto out;
}
rose_route->lci1 = lci;
rose_route->src_addr = *src_addr;
rose_route->dest_addr = *dest_addr;
rose_route->src_call = facilities.dest_call;
rose_route->dest_call = facilities.source_call;
rose_route->rand = facilities.rand;
rose_route->neigh1 = rose_neigh;
rose_route->lci2 = new_lci;
rose_route->neigh2 = new_neigh;
rose_route->neigh1->use++;
rose_route->neigh2->use++;
rose_route->next = rose_route_list;
rose_route_list = rose_route;
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
res = 1;
out:
spin_unlock_bh(&rose_route_list_lock);
spin_unlock_bh(&rose_neigh_list_lock);
return res;
}
#ifdef CONFIG_PROC_FS
static void *rose_node_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_node_list_lock)
{
struct rose_node *rose_node;
int i = 1;
spin_lock_bh(&rose_node_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_node = rose_node_list; rose_node && i < *pos;
rose_node = rose_node->next, ++i);
return (i == *pos) ? rose_node : NULL;
}
static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_node_list
: ((struct rose_node *)v)->next;
}
static void rose_node_stop(struct seq_file *seq, void *v)
__releases(rose_node_list_lock)
{
spin_unlock_bh(&rose_node_list_lock);
}
static int rose_node_show(struct seq_file *seq, void *v)
{
char rsbuf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "address mask n neigh neigh neigh\n");
else {
const struct rose_node *rose_node = v;
/* if (rose_node->loopback) {
seq_printf(seq, "%-10s %04d 1 loopback\n",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask);
} else { */
seq_printf(seq, "%-10s %04d %d",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask,
rose_node->count);
for (i = 0; i < rose_node->count; i++)
seq_printf(seq, " %05d",
rose_node->neighbour[i]->number);
seq_puts(seq, "\n");
/* } */
}
return 0;
}
static const struct seq_operations rose_node_seqops = {
.start = rose_node_start,
.next = rose_node_next,
.stop = rose_node_stop,
.show = rose_node_show,
};
static int rose_nodes_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_node_seqops);
}
const struct file_operations rose_nodes_fops = {
.owner = THIS_MODULE,
.open = rose_nodes_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_neigh_list_lock)
{
struct rose_neigh *rose_neigh;
int i = 1;
spin_lock_bh(&rose_neigh_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos;
rose_neigh = rose_neigh->next, ++i);
return (i == *pos) ? rose_neigh : NULL;
}
static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_neigh_list
: ((struct rose_neigh *)v)->next;
}
static void rose_neigh_stop(struct seq_file *seq, void *v)
__releases(rose_neigh_list_lock)
{
spin_unlock_bh(&rose_neigh_list_lock);
}
static int rose_neigh_show(struct seq_file *seq, void *v)
{
char buf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"addr callsign dev count use mode restart t0 tf digipeaters\n");
else {
struct rose_neigh *rose_neigh = v;
/* if (!rose_neigh->loopback) { */
seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
rose_neigh->number,
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
rose_neigh->use,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
ax25_display_timer(&rose_neigh->ftimer) / HZ);
if (rose_neigh->digipeat != NULL) {
for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
seq_printf(seq, " %s", ax2asc(buf, &rose_neigh->digipeat->calls[i]));
}
seq_puts(seq, "\n");
}
return 0;
}
static const struct seq_operations rose_neigh_seqops = {
.start = rose_neigh_start,
.next = rose_neigh_next,
.stop = rose_neigh_stop,
.show = rose_neigh_show,
};
static int rose_neigh_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_neigh_seqops);
}
const struct file_operations rose_neigh_fops = {
.owner = THIS_MODULE,
.open = rose_neigh_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_route_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_route_list_lock)
{
struct rose_route *rose_route;
int i = 1;
spin_lock_bh(&rose_route_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_route = rose_route_list; rose_route && i < *pos;
rose_route = rose_route->next, ++i);
return (i == *pos) ? rose_route : NULL;
}
static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_route_list
: ((struct rose_route *)v)->next;
}
static void rose_route_stop(struct seq_file *seq, void *v)
__releases(rose_route_list_lock)
{
spin_unlock_bh(&rose_route_list_lock);
}
static int rose_route_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"lci address callsign neigh <-> lci address callsign neigh\n");
else {
struct rose_route *rose_route = v;
if (rose_route->neigh1)
seq_printf(seq,
"%3.3X %-10s %-9s %05d ",
rose_route->lci1,
rose2asc(rsbuf, &rose_route->src_addr),
ax2asc(buf, &rose_route->src_call),
rose_route->neigh1->number);
else
seq_puts(seq,
"000 * * 00000 ");
if (rose_route->neigh2)
seq_printf(seq,
"%3.3X %-10s %-9s %05d\n",
rose_route->lci2,
rose2asc(rsbuf, &rose_route->dest_addr),
ax2asc(buf, &rose_route->dest_call),
rose_route->neigh2->number);
else
seq_puts(seq,
"000 * * 00000\n");
}
return 0;
}
static const struct seq_operations rose_route_seqops = {
.start = rose_route_start,
.next = rose_route_next,
.stop = rose_route_stop,
.show = rose_route_show,
};
static int rose_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_route_seqops);
}
const struct file_operations rose_routes_fops = {
.owner = THIS_MODULE,
.open = rose_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
/*
* Release all memory associated with ROSE routing structures.
*/
void __exit rose_rt_free(void)
{
struct rose_neigh *s, *rose_neigh = rose_neigh_list;
struct rose_node *t, *rose_node = rose_node_list;
struct rose_route *u, *rose_route = rose_route_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
rose_remove_neigh(s);
}
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
rose_remove_node(t);
}
while (rose_route != NULL) {
u = rose_route;
rose_route = rose_route->next;
rose_remove_route(u);
}
}
| /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/arp.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/netfilter.h>
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
static unsigned int rose_neigh_no = 1;
static struct rose_node *rose_node_list;
static DEFINE_SPINLOCK(rose_node_list_lock);
static struct rose_neigh *rose_neigh_list;
static DEFINE_SPINLOCK(rose_neigh_list_lock);
static struct rose_route *rose_route_list;
static DEFINE_SPINLOCK(rose_route_list_lock);
struct rose_neigh *rose_loopback_neigh;
/*
* Add a new route to a node, and in the process add the node and the
* neighbour if it is new.
*/
static int __must_check rose_add_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node, *rose_tmpn, *rose_tmpp;
struct rose_neigh *rose_neigh;
int i, res = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node != NULL && rose_node->loopback) {
res = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC);
if (rose_neigh == NULL) {
res = -ENOMEM;
goto out;
}
rose_neigh->callsign = rose_route->neighbour;
rose_neigh->digipeat = NULL;
rose_neigh->ax25 = NULL;
rose_neigh->dev = dev;
rose_neigh->count = 0;
rose_neigh->use = 0;
rose_neigh->dce_mode = 0;
rose_neigh->loopback = 0;
rose_neigh->number = rose_neigh_no++;
rose_neigh->restarted = 0;
skb_queue_head_init(&rose_neigh->queue);
init_timer(&rose_neigh->ftimer);
init_timer(&rose_neigh->t0timer);
if (rose_route->ndigis != 0) {
rose_neigh->digipeat =
kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
if (rose_neigh->digipeat == NULL) {
kfree(rose_neigh);
res = -ENOMEM;
goto out;
}
rose_neigh->digipeat->ndigi = rose_route->ndigis;
rose_neigh->digipeat->lastrepeat = -1;
for (i = 0; i < rose_route->ndigis; i++) {
rose_neigh->digipeat->calls[i] =
rose_route->digipeaters[i];
rose_neigh->digipeat->repeated[i] = 0;
}
}
rose_neigh->next = rose_neigh_list;
rose_neigh_list = rose_neigh;
}
/*
* This is a new node to be inserted into the list. Find where it needs
* to be inserted into the list, and insert it. We want to be sure
* to order the list in descending order of mask size to ensure that
* later when we are searching this list the first match will be the
* best match.
*/
if (rose_node == NULL) {
rose_tmpn = rose_node_list;
rose_tmpp = NULL;
while (rose_tmpn != NULL) {
if (rose_tmpn->mask > rose_route->mask) {
rose_tmpp = rose_tmpn;
rose_tmpn = rose_tmpn->next;
} else {
break;
}
}
/* create new node */
rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC);
if (rose_node == NULL) {
res = -ENOMEM;
goto out;
}
rose_node->address = rose_route->address;
rose_node->mask = rose_route->mask;
rose_node->count = 1;
rose_node->loopback = 0;
rose_node->neighbour[0] = rose_neigh;
if (rose_tmpn == NULL) {
if (rose_tmpp == NULL) { /* Empty list */
rose_node_list = rose_node;
rose_node->next = NULL;
} else {
rose_tmpp->next = rose_node;
rose_node->next = NULL;
}
} else {
if (rose_tmpp == NULL) { /* 1st node */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
} else {
rose_tmpp->next = rose_node;
rose_node->next = rose_tmpn;
}
}
rose_neigh->count++;
goto out;
}
/* We have space, slot it in */
if (rose_node->count < 3) {
rose_node->neighbour[rose_node->count] = rose_neigh;
rose_node->count++;
rose_neigh->count++;
}
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Caller is holding rose_node_list_lock.
*/
static void rose_remove_node(struct rose_node *rose_node)
{
struct rose_node *s;
if ((s = rose_node_list) == rose_node) {
rose_node_list = rose_node->next;
kfree(rose_node);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_node) {
s->next = rose_node->next;
kfree(rose_node);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_neigh_list_lock.
*/
static void rose_remove_neigh(struct rose_neigh *rose_neigh)
{
struct rose_neigh *s;
rose_stop_ftimer(rose_neigh);
rose_stop_t0timer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
if (rose_neigh->ax25)
ax25_cb_put(rose_neigh->ax25);
kfree(rose_neigh->digipeat);
kfree(rose_neigh);
return;
}
s = s->next;
}
}
/*
* Caller is holding rose_route_list_lock.
*/
static void rose_remove_route(struct rose_route *rose_route)
{
struct rose_route *s;
if (rose_route->neigh1 != NULL)
rose_route->neigh1->use--;
if (rose_route->neigh2 != NULL)
rose_route->neigh2->use--;
if ((s = rose_route_list) == rose_route) {
rose_route_list = rose_route->next;
kfree(rose_route);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_route) {
s->next = rose_route->next;
kfree(rose_route);
return;
}
s = s->next;
}
}
/*
* "Delete" a node. Strictly speaking remove a route to a node. The node
* is only deleted if no routes are left to it.
*/
static int rose_del_node(struct rose_route_struct *rose_route,
struct net_device *dev)
{
struct rose_node *rose_node;
struct rose_neigh *rose_neigh;
int i, err = 0;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == rose_route->mask) &&
(rosecmpm(&rose_route->address, &rose_node->address,
rose_route->mask) == 0))
break;
rose_node = rose_node->next;
}
if (rose_node == NULL || rose_node->loopback) {
err = -EINVAL;
goto out;
}
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&rose_route->neighbour,
&rose_neigh->callsign) == 0 &&
rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
err = -EINVAL;
goto out;
}
for (i = 0; i < rose_node->count; i++) {
if (rose_node->neighbour[i] == rose_neigh) {
rose_neigh->count--;
if (rose_neigh->count == 0 && rose_neigh->use == 0)
rose_remove_neigh(rose_neigh);
rose_node->count--;
if (rose_node->count == 0) {
rose_remove_node(rose_node);
} else {
switch (i) {
case 0:
rose_node->neighbour[0] =
rose_node->neighbour[1];
case 1:
rose_node->neighbour[1] =
rose_node->neighbour[2];
case 2:
break;
}
}
goto out;
}
}
err = -EINVAL;
out:
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Add the loopback neighbour.
*/
void rose_add_loopback_neigh(void)
{
struct rose_neigh *sn;
rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
if (!rose_loopback_neigh)
return;
sn = rose_loopback_neigh;
sn->callsign = null_ax25_address;
sn->digipeat = NULL;
sn->ax25 = NULL;
sn->dev = NULL;
sn->count = 0;
sn->use = 0;
sn->dce_mode = 1;
sn->loopback = 1;
sn->number = rose_neigh_no++;
sn->restarted = 1;
skb_queue_head_init(&sn->queue);
init_timer(&sn->ftimer);
init_timer(&sn->t0timer);
spin_lock_bh(&rose_neigh_list_lock);
sn->next = rose_neigh_list;
rose_neigh_list = sn;
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* Add a loopback node.
*/
int rose_add_loopback_node(rose_address *address)
{
struct rose_node *rose_node;
int err = 0;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node != NULL)
goto out;
if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL) {
err = -ENOMEM;
goto out;
}
rose_node->address = *address;
rose_node->mask = 10;
rose_node->count = 1;
rose_node->loopback = 1;
rose_node->neighbour[0] = rose_loopback_neigh;
/* Insert at the head of list. Address is always mask=10 */
rose_node->next = rose_node_list;
rose_node_list = rose_node;
rose_loopback_neigh->count++;
out:
spin_unlock_bh(&rose_node_list_lock);
return err;
}
/*
* Delete a loopback node.
*/
void rose_del_loopback_node(rose_address *address)
{
struct rose_node *rose_node;
spin_lock_bh(&rose_node_list_lock);
rose_node = rose_node_list;
while (rose_node != NULL) {
if ((rose_node->mask == 10) &&
(rosecmpm(address, &rose_node->address, 10) == 0) &&
rose_node->loopback)
break;
rose_node = rose_node->next;
}
if (rose_node == NULL)
goto out;
rose_remove_node(rose_node);
rose_loopback_neigh->count--;
out:
spin_unlock_bh(&rose_node_list_lock);
}
/*
* A device has been removed. Remove its routes and neighbours.
*/
void rose_rt_device_down(struct net_device *dev)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
int i;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->dev != dev)
continue;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
for (i = 0; i < t->count; i++) {
if (t->neighbour[i] != s)
continue;
t->count--;
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
case 1:
t->neighbour[1] = t->neighbour[2];
case 2:
break;
}
}
if (t->count <= 0)
rose_remove_node(t);
}
rose_remove_neigh(s);
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
}
#if 0 /* Currently unused */
/*
* A device has been removed. Remove its links.
*/
void rose_route_device_down(struct net_device *dev)
{
struct rose_route *s, *rose_route;
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
s = rose_route;
rose_route = rose_route->next;
if (s->neigh1->dev == dev || s->neigh2->dev == dev)
rose_remove_route(s);
}
spin_unlock_bh(&rose_route_list_lock);
}
#endif
/*
* Clear all nodes and neighbours out, except for neighbours with
* active connections going through them.
* Do not clear loopback neighbour and nodes.
*/
static int rose_clear_routes(void)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
rose_node = rose_node_list;
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
if (!t->loopback)
rose_remove_node(t);
}
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
if (s->use == 0 && !s->loopback) {
s->count = 0;
rose_remove_neigh(s);
}
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
return 0;
}
/*
* Check that the device given is a valid AX.25 interface that is "up".
* called whith RTNL
*/
static struct net_device *rose_ax25_dev_find(char *devname)
{
struct net_device *dev;
if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
return NULL;
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
return dev;
return NULL;
}
/*
* Find the first active ROSE device, usually "rose0".
*/
struct net_device *rose_dev_first(void)
{
struct net_device *dev, *first = NULL;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
rcu_read_unlock();
return first;
}
/*
* Find the ROSE device for the given address.
*/
struct net_device *rose_dev_get(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
}
dev = NULL;
out:
rcu_read_unlock();
return dev;
}
static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
rcu_read_unlock();
return dev != NULL;
}
struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neigh)
{
struct rose_route *rose_route;
for (rose_route = rose_route_list; rose_route != NULL; rose_route = rose_route->next)
if ((rose_route->neigh1 == neigh && rose_route->lci1 == lci) ||
(rose_route->neigh2 == neigh && rose_route->lci2 == lci))
return rose_route;
return NULL;
}
/*
* Find a neighbour or a route given a ROSE address.
*/
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
goto out;
}
}
}
}
if (!route_frame) { /* connect request */
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
failed = 0;
goto out;
}
failed = 1;
}
}
}
}
if (failed) {
*cause = ROSE_OUT_OF_ORDER;
*diagnostic = 0;
} else {
*cause = ROSE_NOT_OBTAINABLE;
*diagnostic = 0;
}
out:
if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
/*
* Handle the ioctls that control the routing functions.
*/
int rose_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct rose_route_struct rose_route;
struct net_device *dev;
int err;
switch (cmd) {
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
return err;
case SIOCRSCLRRT:
return rose_clear_routes();
default:
return -EINVAL;
}
return 0;
}
static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
{
struct rose_route *rose_route, *s;
rose_neigh->restarted = 0;
rose_stop_t0timer(rose_neigh);
rose_start_ftimer(rose_neigh);
skb_queue_purge(&rose_neigh->queue);
spin_lock_bh(&rose_route_list_lock);
rose_route = rose_route_list;
while (rose_route != NULL) {
if ((rose_route->neigh1 == rose_neigh && rose_route->neigh2 == rose_neigh) ||
(rose_route->neigh1 == rose_neigh && rose_route->neigh2 == NULL) ||
(rose_route->neigh2 == rose_neigh && rose_route->neigh1 == NULL)) {
s = rose_route->next;
rose_remove_route(rose_route);
rose_route = s;
continue;
}
if (rose_route->neigh1 == rose_neigh) {
rose_route->neigh1->use--;
rose_route->neigh1 = NULL;
rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
}
if (rose_route->neigh2 == rose_neigh) {
rose_route->neigh2->use--;
rose_route->neigh2 = NULL;
rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
}
rose_route = rose_route->next;
}
spin_unlock_bh(&rose_route_list_lock);
}
/*
* A level 2 link has timed out, therefore it appears to be a poor link,
* then don't use that neighbour until it is reset. Blow away all through
* routes and connections using this route.
*/
void rose_link_failed(ax25_cb *ax25, int reason)
{
struct rose_neigh *rose_neigh;
spin_lock_bh(&rose_neigh_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (rose_neigh->ax25 == ax25)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh != NULL) {
rose_neigh->ax25 = NULL;
ax25_cb_put(ax25);
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
spin_unlock_bh(&rose_neigh_list_lock);
}
/*
* A device has been "downed" remove its link status. Blow away all
* through routes and connections that use this device.
*/
void rose_link_device_down(struct net_device *dev)
{
struct rose_neigh *rose_neigh;
for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next) {
if (rose_neigh->dev == dev) {
rose_del_route_by_neigh(rose_neigh);
rose_kill_by_neigh(rose_neigh);
}
}
}
/*
* Route a frame to an appropriate AX.25 connection.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
struct rose_neigh *rose_neigh, *new_neigh;
struct rose_route *rose_route;
struct rose_facilities_struct facilities;
rose_address *src_addr, *dest_addr;
struct sock *sk;
unsigned short frametype;
unsigned int lci, new_lci;
unsigned char cause, diagnostic;
struct net_device *dev;
int res = 0;
char buf[11];
#if 0
if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT)
return res;
#endif
if (skb->len < ROSE_MIN_LEN)
return res;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&
(skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
ROSE_CALL_REQ_ADDR_LEN_VAL))
return res;
src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF);
dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
spin_lock_bh(&rose_neigh_list_lock);
spin_lock_bh(&rose_route_list_lock);
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
if (ax25cmp(&ax25->dest_addr, &rose_neigh->callsign) == 0 &&
ax25->ax25_dev->dev == rose_neigh->dev)
break;
rose_neigh = rose_neigh->next;
}
if (rose_neigh == NULL) {
printk("rose_route : unknown neighbour or device %s\n",
ax2asc(buf, &ax25->dest_addr));
goto out;
}
/*
* Obviously the link is working, halt the ftimer.
*/
rose_stop_ftimer(rose_neigh);
/*
* LCI of zero is always for us, and its always a restart
* frame.
*/
if (lci == 0) {
rose_link_rx_restart(skb, rose_neigh, frametype);
goto out;
}
/*
* Find an existing socket.
*/
if ((sk = rose_find_socket(lci, rose_neigh)) != NULL) {
if (frametype == ROSE_CALL_REQUEST) {
struct rose_sock *rose = rose_sk(sk);
/* Remove an existing unused socket */
rose_clear_queues(sk);
rose->cause = ROSE_NETWORK_CONGESTION;
rose->diagnostic = 0;
rose->neighbour->use--;
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
sk->sk_state = TCP_CLOSE;
sk->sk_err = 0;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
else {
skb_reset_transport_header(skb);
res = rose_process_rx_frame(sk, skb);
goto out;
}
}
/*
* Is is a Call Request and is it for us ?
*/
if (frametype == ROSE_CALL_REQUEST)
if ((dev = rose_dev_get(dest_addr)) != NULL) {
res = rose_rx_call_request(skb, dev, rose_neigh, lci);
dev_put(dev);
goto out;
}
if (!sysctl_rose_routing_control) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 0);
goto out;
}
/*
* Route it to the next in line if we have an entry for it.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->lci1 == lci &&
rose_route->neigh1 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh2 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
if (rose_route->lci2 == lci &&
rose_route->neigh2 == rose_neigh) {
if (frametype == ROSE_CALL_REQUEST) {
/* F6FBB - Remove an existing unused route */
rose_remove_route(rose_route);
break;
} else if (rose_route->neigh1 != NULL) {
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci1 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci1 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh1);
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
res = 1;
goto out;
} else {
if (frametype == ROSE_CLEAR_CONFIRMATION)
rose_remove_route(rose_route);
goto out;
}
}
rose_route = rose_route->next;
}
/*
* We know that:
* 1. The frame isn't for us,
* 2. It isn't "owned" by any existing route.
*/
if (frametype != ROSE_CALL_REQUEST) { /* XXX */
res = 0;
goto out;
}
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
&facilities)) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76);
goto out;
}
/*
* Check for routing loops.
*/
rose_route = rose_route_list;
while (rose_route != NULL) {
if (rose_route->rand == facilities.rand &&
rosecmp(src_addr, &rose_route->src_addr) == 0 &&
ax25cmp(&facilities.dest_call, &rose_route->src_call) == 0 &&
ax25cmp(&facilities.source_call, &rose_route->dest_call) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 120);
goto out;
}
rose_route = rose_route->next;
}
if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic);
goto out;
}
if ((new_lci = rose_new_lci(new_neigh)) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
goto out;
}
if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
goto out;
}
rose_route->lci1 = lci;
rose_route->src_addr = *src_addr;
rose_route->dest_addr = *dest_addr;
rose_route->src_call = facilities.dest_call;
rose_route->dest_call = facilities.source_call;
rose_route->rand = facilities.rand;
rose_route->neigh1 = rose_neigh;
rose_route->lci2 = new_lci;
rose_route->neigh2 = new_neigh;
rose_route->neigh1->use++;
rose_route->neigh2->use++;
rose_route->next = rose_route_list;
rose_route_list = rose_route;
skb->data[0] &= 0xF0;
skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F;
skb->data[1] = (rose_route->lci2 >> 0) & 0xFF;
rose_transmit_link(skb, rose_route->neigh2);
res = 1;
out:
spin_unlock_bh(&rose_route_list_lock);
spin_unlock_bh(&rose_neigh_list_lock);
return res;
}
#ifdef CONFIG_PROC_FS
static void *rose_node_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_node_list_lock)
{
struct rose_node *rose_node;
int i = 1;
spin_lock_bh(&rose_node_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_node = rose_node_list; rose_node && i < *pos;
rose_node = rose_node->next, ++i);
return (i == *pos) ? rose_node : NULL;
}
static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_node_list
: ((struct rose_node *)v)->next;
}
static void rose_node_stop(struct seq_file *seq, void *v)
__releases(rose_node_list_lock)
{
spin_unlock_bh(&rose_node_list_lock);
}
static int rose_node_show(struct seq_file *seq, void *v)
{
char rsbuf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq, "address mask n neigh neigh neigh\n");
else {
const struct rose_node *rose_node = v;
/* if (rose_node->loopback) {
seq_printf(seq, "%-10s %04d 1 loopback\n",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask);
} else { */
seq_printf(seq, "%-10s %04d %d",
rose2asc(rsbuf, &rose_node->address),
rose_node->mask,
rose_node->count);
for (i = 0; i < rose_node->count; i++)
seq_printf(seq, " %05d",
rose_node->neighbour[i]->number);
seq_puts(seq, "\n");
/* } */
}
return 0;
}
static const struct seq_operations rose_node_seqops = {
.start = rose_node_start,
.next = rose_node_next,
.stop = rose_node_stop,
.show = rose_node_show,
};
static int rose_nodes_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_node_seqops);
}
const struct file_operations rose_nodes_fops = {
.owner = THIS_MODULE,
.open = rose_nodes_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_neigh_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_neigh_list_lock)
{
struct rose_neigh *rose_neigh;
int i = 1;
spin_lock_bh(&rose_neigh_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos;
rose_neigh = rose_neigh->next, ++i);
return (i == *pos) ? rose_neigh : NULL;
}
static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_neigh_list
: ((struct rose_neigh *)v)->next;
}
static void rose_neigh_stop(struct seq_file *seq, void *v)
__releases(rose_neigh_list_lock)
{
spin_unlock_bh(&rose_neigh_list_lock);
}
static int rose_neigh_show(struct seq_file *seq, void *v)
{
char buf[11];
int i;
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"addr callsign dev count use mode restart t0 tf digipeaters\n");
else {
struct rose_neigh *rose_neigh = v;
/* if (!rose_neigh->loopback) { */
seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
rose_neigh->number,
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
rose_neigh->use,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
ax25_display_timer(&rose_neigh->ftimer) / HZ);
if (rose_neigh->digipeat != NULL) {
for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
seq_printf(seq, " %s", ax2asc(buf, &rose_neigh->digipeat->calls[i]));
}
seq_puts(seq, "\n");
}
return 0;
}
static const struct seq_operations rose_neigh_seqops = {
.start = rose_neigh_start,
.next = rose_neigh_next,
.stop = rose_neigh_stop,
.show = rose_neigh_show,
};
static int rose_neigh_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_neigh_seqops);
}
const struct file_operations rose_neigh_fops = {
.owner = THIS_MODULE,
.open = rose_neigh_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rose_route_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_route_list_lock)
{
struct rose_route *rose_route;
int i = 1;
spin_lock_bh(&rose_route_list_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
for (rose_route = rose_route_list; rose_route && i < *pos;
rose_route = rose_route->next, ++i);
return (i == *pos) ? rose_route : NULL;
}
static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return (v == SEQ_START_TOKEN) ? rose_route_list
: ((struct rose_route *)v)->next;
}
static void rose_route_stop(struct seq_file *seq, void *v)
__releases(rose_route_list_lock)
{
spin_unlock_bh(&rose_route_list_lock);
}
static int rose_route_show(struct seq_file *seq, void *v)
{
char buf[11], rsbuf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"lci address callsign neigh <-> lci address callsign neigh\n");
else {
struct rose_route *rose_route = v;
if (rose_route->neigh1)
seq_printf(seq,
"%3.3X %-10s %-9s %05d ",
rose_route->lci1,
rose2asc(rsbuf, &rose_route->src_addr),
ax2asc(buf, &rose_route->src_call),
rose_route->neigh1->number);
else
seq_puts(seq,
"000 * * 00000 ");
if (rose_route->neigh2)
seq_printf(seq,
"%3.3X %-10s %-9s %05d\n",
rose_route->lci2,
rose2asc(rsbuf, &rose_route->dest_addr),
ax2asc(buf, &rose_route->dest_call),
rose_route->neigh2->number);
else
seq_puts(seq,
"000 * * 00000\n");
}
return 0;
}
static const struct seq_operations rose_route_seqops = {
.start = rose_route_start,
.next = rose_route_next,
.stop = rose_route_stop,
.show = rose_route_show,
};
static int rose_route_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rose_route_seqops);
}
const struct file_operations rose_routes_fops = {
.owner = THIS_MODULE,
.open = rose_route_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
/*
* Release all memory associated with ROSE routing structures.
*/
void __exit rose_rt_free(void)
{
struct rose_neigh *s, *rose_neigh = rose_neigh_list;
struct rose_node *t, *rose_node = rose_node_list;
struct rose_route *u, *rose_route = rose_route_list;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
rose_remove_neigh(s);
}
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
rose_remove_node(t);
}
while (rose_route != NULL) {
u = rose_route;
rose_route = rose_route->next;
rose_remove_route(u);
}
}
|
87,233,230,065,746 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose);
/*
* This routine purges all of the queues of frames.
*/
void rose_clear_queues(struct sock *sk)
{
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void rose_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
struct rose_sock *rose = rose_sk(sk);
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (rose->va != nr) {
while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) {
skb = skb_dequeue(&rose->ack_queue);
kfree_skb(skb);
rose->va = (rose->va + 1) % ROSE_MODULUS;
}
}
}
void rose_requeue_frames(struct sock *sk)
{
struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by rose_kick. This arrangement handles the possibility of an
* empty output queue.
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int rose_validate_nr(struct sock *sk, unsigned short nr)
{
struct rose_sock *rose = rose_sk(sk);
unsigned short vc = rose->va;
while (vc != rose->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ROSE_MODULUS;
}
return nr == rose->vs;
}
/*
* This routine is called when the packet layer internally generates a
* control frame.
*/
void rose_write_internal(struct sock *sk, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
char buffer[100];
int len, faclen = 0;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
faclen = rose_create_facilities(buffer, rose);
len += faclen;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_RESET_REQUEST:
len += 2;
break;
}
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
/*
* Space for AX.25 header and PID.
*/
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
dptr = skb_put(skb, skb_tailroom(skb));
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
switch (frametype) {
case ROSE_CALL_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0xAA;
memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, buffer, faclen);
dptr += faclen;
break;
case ROSE_CALL_ACCEPTED:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0x00; /* Address length */
*dptr++ = 0; /* Facilities length */
break;
case ROSE_CLEAR_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = rose->cause;
*dptr++ = rose->diagnostic;
break;
case ROSE_RESET_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_DTE_ORIGINATED;
*dptr++ = 0;
break;
case ROSE_RR:
case ROSE_RNR:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr = frametype;
*dptr++ |= (rose->vr << 5) & 0xE0;
break;
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_CONFIRMATION:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
break;
default:
printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype);
kfree_skb(skb);
return;
}
rose_transmit_link(skb, rose->neighbour);
}
int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
{
unsigned char *frame;
frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
case ROSE_CALL_REQUEST:
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_REQUEST:
case ROSE_RESET_CONFIRMATION:
return frame[2];
default:
break;
}
if ((frame[2] & 0x1F) == ROSE_RR ||
(frame[2] & 0x1F) == ROSE_RNR) {
*nr = (frame[2] >> 5) & 0x07;
return frame[2] & 0x1F;
}
if ((frame[2] & 0x01) == ROSE_DATA) {
*q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
*d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT;
*m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT;
*nr = (frame[2] >> 5) & 0x07;
*ns = (frame[2] >> 1) & 0x07;
return ROSE_DATA;
}
return ROSE_ILLEGAL;
}
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT) {
if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
} else {
if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char l, n = 0;
char callsign[11];
do {
switch (*p & 0xC0) {
case 0x00:
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
l = p[1];
/* Prevent overflows*/
if (l < 10 || l > 20)
return -1;
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->source_call, callsign);
}
if (*p == FAC_CCITT_SRC_NSAP) {
memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->dest_call, callsign);
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
int rose_parse_facilities(unsigned char *p,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0)
return 0;
while (facilities_len > 0) {
if (*p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
if (len < 0)
return 0;
facilities_len -= len + 1;
p += len + 1;
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
if (len < 0)
return 0;
facilities_len -= len + 1;
p += len + 1;
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
facilities_len--;
p++;
break;
}
} else
break; /* Error in facilities format */
}
return 1;
}
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
{
unsigned char *p = buffer + 1;
char *callsign;
char buf[11];
int len, nb;
/* National Facilities */
if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) {
*p++ = 0x00;
*p++ = FAC_NATIONAL;
if (rose->rand != 0) {
*p++ = FAC_NATIONAL_RAND;
*p++ = (rose->rand >> 8) & 0xFF;
*p++ = (rose->rand >> 0) & 0xFF;
}
/* Sent before older facilities */
if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) {
int maxdigi = 0;
*p++ = FAC_NATIONAL_DIGIS;
*p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis);
for (nb = 0 ; nb < rose->source_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN);
p[6] |= AX25_HBIT;
p += AX25_ADDR_LEN;
}
for (nb = 0 ; nb < rose->dest_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN);
p[6] &= ~AX25_HBIT;
p += AX25_ADDR_LEN;
}
}
/* For compatibility */
if (rose->source_ndigis > 0) {
*p++ = FAC_NATIONAL_SRC_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
/* For compatibility */
if (rose->dest_ndigis > 0) {
*p++ = FAC_NATIONAL_DEST_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
}
*p++ = 0x00;
*p++ = FAC_CCITT;
*p++ = FAC_CCITT_DEST_NSAP;
callsign = ax2asc(buf, &rose->dest_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
*p++ = FAC_CCITT_SRC_NSAP;
callsign = ax2asc(buf, &rose->source_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->source_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
len = p - buffer;
buffer[0] = len - 1;
return len;
}
void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
{
struct rose_sock *rose = rose_sk(sk);
rose_stop_timer(sk);
rose_stop_idletimer(sk);
rose_clear_queues(sk);
rose->lci = 0;
rose->state = ROSE_STATE_0;
if (cause != -1)
rose->cause = cause;
if (diagnostic != -1)
rose->diagnostic = diagnostic;
sk->sk_state = TCP_CLOSE;
sk->sk_err = reason;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
| /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose);
/*
* This routine purges all of the queues of frames.
*/
void rose_clear_queues(struct sock *sk)
{
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&rose_sk(sk)->ack_queue);
}
/*
* This routine purges the input queue of those frames that have been
* acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
* SDL diagram.
*/
void rose_frames_acked(struct sock *sk, unsigned short nr)
{
struct sk_buff *skb;
struct rose_sock *rose = rose_sk(sk);
/*
* Remove all the ack-ed frames from the ack queue.
*/
if (rose->va != nr) {
while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) {
skb = skb_dequeue(&rose->ack_queue);
kfree_skb(skb);
rose->va = (rose->va + 1) % ROSE_MODULUS;
}
}
}
void rose_requeue_frames(struct sock *sk)
{
struct sk_buff *skb, *skb_prev = NULL;
/*
* Requeue all the un-ack-ed frames on the output queue to be picked
* up by rose_kick. This arrangement handles the possibility of an
* empty output queue.
*/
while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
/*
* Validate that the value of nr is between va and vs. Return true or
* false for testing.
*/
int rose_validate_nr(struct sock *sk, unsigned short nr)
{
struct rose_sock *rose = rose_sk(sk);
unsigned short vc = rose->va;
while (vc != rose->vs) {
if (nr == vc) return 1;
vc = (vc + 1) % ROSE_MODULUS;
}
return nr == rose->vs;
}
/*
* This routine is called when the packet layer internally generates a
* control frame.
*/
void rose_write_internal(struct sock *sk, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
char buffer[100];
int len, faclen = 0;
len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
faclen = rose_create_facilities(buffer, rose);
len += faclen;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_RESET_REQUEST:
len += 2;
break;
}
if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
return;
/*
* Space for AX.25 header and PID.
*/
skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
dptr = skb_put(skb, skb_tailroom(skb));
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
switch (frametype) {
case ROSE_CALL_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL;
memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, buffer, faclen);
dptr += faclen;
break;
case ROSE_CALL_ACCEPTED:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = 0x00; /* Address length */
*dptr++ = 0; /* Facilities length */
break;
case ROSE_CLEAR_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = rose->cause;
*dptr++ = rose->diagnostic;
break;
case ROSE_RESET_REQUEST:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
*dptr++ = ROSE_DTE_ORIGINATED;
*dptr++ = 0;
break;
case ROSE_RR:
case ROSE_RNR:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr = frametype;
*dptr++ |= (rose->vr << 5) & 0xE0;
break;
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_CONFIRMATION:
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
break;
default:
printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype);
kfree_skb(skb);
return;
}
rose_transmit_link(skb, rose->neighbour);
}
int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
{
unsigned char *frame;
frame = skb->data;
*ns = *nr = *q = *d = *m = 0;
switch (frame[2]) {
case ROSE_CALL_REQUEST:
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
case ROSE_CLEAR_CONFIRMATION:
case ROSE_RESET_REQUEST:
case ROSE_RESET_CONFIRMATION:
return frame[2];
default:
break;
}
if ((frame[2] & 0x1F) == ROSE_RR ||
(frame[2] & 0x1F) == ROSE_RNR) {
*nr = (frame[2] >> 5) & 0x07;
return frame[2] & 0x1F;
}
if ((frame[2] & 0x01) == ROSE_DATA) {
*q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
*d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT;
*m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT;
*nr = (frame[2] >> 5) & 0x07;
*ns = (frame[2] >> 1) & 0x07;
return ROSE_DATA;
}
return ROSE_ILLEGAL;
}
static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char *pt;
unsigned char l, lg, n = 0;
int fac_national_digis_received = 0;
do {
switch (*p & 0xC0) {
case 0x00:
if (len < 2)
return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (len < 3)
return -1;
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
if (len < 4)
return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
if (len < 2)
return -1;
l = p[1];
if (len < 2 + l)
return -1;
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
if (l < AX25_ADDR_LEN)
return -1;
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
if (l < 1 + ROSE_ADDR_LEN)
return -1;
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
if (l % AX25_ADDR_LEN)
return -1;
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
if (pt[6] & AX25_HBIT) {
if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
} else {
if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
return -1;
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
}
}
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len)
{
unsigned char l, n = 0;
char callsign[11];
do {
switch (*p & 0xC0) {
case 0x00:
if (len < 2)
return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
if (len < 3)
return -1;
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
if (len < 4)
return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
if (len < 2)
return -1;
l = p[1];
/* Prevent overflows*/
if (l < 10 || l > 20)
return -1;
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->source_call, callsign);
}
if (*p == FAC_CCITT_SRC_NSAP) {
memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
callsign[l - 10] = '\0';
asc2ax(&facilities->dest_call, callsign);
}
p += l + 2;
n += l + 2;
len -= l + 2;
break;
}
} while (*p != 0x00 && len > 0);
return n;
}
int rose_parse_facilities(unsigned char *p, unsigned packet_len,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
return 0;
while (facilities_len >= 3 && *p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
len = 1;
break;
}
if (len < 0)
return 0;
if (WARN_ON(len >= facilities_len))
return 0;
facilities_len -= len + 1;
p += len + 1;
}
return facilities_len == 0;
}
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
{
unsigned char *p = buffer + 1;
char *callsign;
char buf[11];
int len, nb;
/* National Facilities */
if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) {
*p++ = 0x00;
*p++ = FAC_NATIONAL;
if (rose->rand != 0) {
*p++ = FAC_NATIONAL_RAND;
*p++ = (rose->rand >> 8) & 0xFF;
*p++ = (rose->rand >> 0) & 0xFF;
}
/* Sent before older facilities */
if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) {
int maxdigi = 0;
*p++ = FAC_NATIONAL_DIGIS;
*p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis);
for (nb = 0 ; nb < rose->source_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN);
p[6] |= AX25_HBIT;
p += AX25_ADDR_LEN;
}
for (nb = 0 ; nb < rose->dest_ndigis ; nb++) {
if (++maxdigi >= ROSE_MAX_DIGIS)
break;
memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN);
p[6] &= ~AX25_HBIT;
p += AX25_ADDR_LEN;
}
}
/* For compatibility */
if (rose->source_ndigis > 0) {
*p++ = FAC_NATIONAL_SRC_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
/* For compatibility */
if (rose->dest_ndigis > 0) {
*p++ = FAC_NATIONAL_DEST_DIGI;
*p++ = AX25_ADDR_LEN;
memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN);
p += AX25_ADDR_LEN;
}
}
*p++ = 0x00;
*p++ = FAC_CCITT;
*p++ = FAC_CCITT_DEST_NSAP;
callsign = ax2asc(buf, &rose->dest_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
*p++ = FAC_CCITT_SRC_NSAP;
callsign = ax2asc(buf, &rose->source_call);
*p++ = strlen(callsign) + 10;
*p++ = (strlen(callsign) + 9) * 2; /* ??? */
*p++ = 0x47; *p++ = 0x00; *p++ = 0x11;
*p++ = ROSE_ADDR_LEN * 2;
memcpy(p, &rose->source_addr, ROSE_ADDR_LEN);
p += ROSE_ADDR_LEN;
memcpy(p, callsign, strlen(callsign));
p += strlen(callsign);
len = p - buffer;
buffer[0] = len - 1;
return len;
}
void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
{
struct rose_sock *rose = rose_sk(sk);
rose_stop_timer(sk);
rose_stop_idletimer(sk);
rose_clear_queues(sk);
rose->lci = 0;
rose->state = ROSE_STATE_0;
if (cause != -1)
rose->cause = cause;
if (diagnostic != -1)
rose->diagnostic = diagnostic;
sk->sk_state = TCP_CLOSE;
sk->sk_err = reason;
sk->sk_shutdown |= SEND_SHUTDOWN;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
}
}
|
63,491,585,838,103 | /*
* Copyright (C) 2004 IBM Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
* Dave Safford <safford@watson.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
* Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* Note, the TPM chip is not interrupt driven (only polling)
* and can have very long timeouts (minutes!). Hence the unusual
* calls to msleep.
*
*/
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "tpm.h"
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 256,
};
enum tpm_duration {
TPM_SHORT = 0,
TPM_MEDIUM = 1,
TPM_LONG = 2,
TPM_UNDEFINED,
};
#define TPM_MAX_ORDINAL 243
#define TPM_MAX_PROTECTED_ORDINAL 12
#define TPM_PROTECTED_ORDINAL_MASK 0xFF
/*
* Bug workaround - some TPM's don't flush the most
* recently changed pcr on suspend, so force the flush
* with an extend to the selected _unused_ non-volatile pcr.
*/
static int tpm_suspend_pcr;
module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
MODULE_PARM_DESC(suspend_pcr,
"PCR to use for dummy writes to faciltate flush on suspend.");
static LIST_HEAD(tpm_chip_list);
static DEFINE_SPINLOCK(driver_lock);
static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
/*
* Array with one entry per ordinal defining the maximum amount
* of time the chip could take to return the result. The ordinal
* designation of short, medium or long is defined in a table in
* TCG Specification TPM Main Part 2 TPM Structures Section 17. The
* values of the SHORT, MEDIUM, and LONG durations are retrieved
* from the chip during initialization with a call to tpm_get_timeouts.
*/
static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 5 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 10 */
TPM_SHORT,
};
static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 5 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 10 */
TPM_SHORT,
TPM_MEDIUM,
TPM_LONG,
TPM_LONG,
TPM_MEDIUM, /* 15 */
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM,
TPM_LONG,
TPM_SHORT, /* 20 */
TPM_SHORT,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT, /* 25 */
TPM_SHORT,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 30 */
TPM_LONG,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 35 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 40 */
TPM_LONG,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 45 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_LONG,
TPM_MEDIUM, /* 50 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 55 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 60 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 65 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 70 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 75 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 80 */
TPM_UNDEFINED,
TPM_MEDIUM,
TPM_LONG,
TPM_SHORT,
TPM_UNDEFINED, /* 85 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 90 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 95 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 100 */
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 105 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 110 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 115 */
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 120 */
TPM_LONG,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 125 */
TPM_SHORT,
TPM_LONG,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 130 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_SHORT,
TPM_MEDIUM,
TPM_UNDEFINED, /* 135 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 140 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 145 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 150 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 155 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 160 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 165 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 170 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 175 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 180 */
TPM_SHORT,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM, /* 185 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 190 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 195 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 200 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 205 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 210 */
TPM_UNDEFINED,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_UNDEFINED, /* 215 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 220 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 225 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 230 */
TPM_LONG,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 235 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 240 */
TPM_UNDEFINED,
TPM_MEDIUM,
};
static void user_reader_timeout(unsigned long ptr)
{
struct tpm_chip *chip = (struct tpm_chip *) ptr;
schedule_work(&chip->work);
}
static void timeout_work(struct work_struct *work)
{
struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
mutex_lock(&chip->buffer_mutex);
atomic_set(&chip->data_pending, 0);
memset(chip->data_buffer, 0, TPM_BUFSIZE);
mutex_unlock(&chip->buffer_mutex);
}
/*
* Returns max number of jiffies to wait
*/
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
u32 ordinal)
{
int duration_idx = TPM_UNDEFINED;
int duration = 0;
if (ordinal < TPM_MAX_ORDINAL)
duration_idx = tpm_ordinal_duration[ordinal];
else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
TPM_MAX_PROTECTED_ORDINAL)
duration_idx =
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
if (duration <= 0)
return 2 * 60 * HZ;
else
return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
/*
* Internal kernel interface to transmit TPM commands
*/
static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
size_t bufsiz)
{
ssize_t rc;
u32 count, ordinal;
unsigned long stop;
count = be32_to_cpu(*((__be32 *) (buf + 2)));
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->dev,
"invalid count value %x %zx \n", count, bufsiz);
return -E2BIG;
}
mutex_lock(&chip->tpm_mutex);
if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
if (chip->vendor.irq)
goto out_recv;
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
u8 status = chip->vendor.status(chip);
if ((status & chip->vendor.req_complete_mask) ==
chip->vendor.req_complete_val)
goto out_recv;
if ((status == chip->vendor.req_canceled)) {
dev_err(chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
msleep(TPM_TIMEOUT); /* CHECK */
rmb();
} while (time_before(jiffies, stop));
chip->vendor.cancel(chip);
dev_err(chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
dev_err(chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
mutex_unlock(&chip->tpm_mutex);
return rc;
}
#define TPM_DIGEST_SIZE 20
#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
enum tpm_capabilities {
TPM_CAP_FLAG = cpu_to_be32(4),
TPM_CAP_PROP = cpu_to_be32(5),
CAP_VERSION_1_1 = cpu_to_be32(0x06),
CAP_VERSION_1_2 = cpu_to_be32(0x1A)
};
enum tpm_sub_capabilities {
TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
};
static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
int len, const char *desc)
{
int err;
len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
if (len == TPM_ERROR_SIZE) {
err = be32_to_cpu(cmd->header.out.return_code);
dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
}
return 0;
}
#define TPM_INTERNAL_RESULT_SIZE 200
#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
#define TPM_ORD_GET_CAP cpu_to_be32(101)
static const struct tpm_input_header tpm_getcap_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(22),
.ordinal = TPM_ORD_GET_CAP
};
ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_getcap_header;
if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
tpm_cmd.params.getcap_in.cap = subcap_id;
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
else
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
}
void tpm_gen_interrupt(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
ssize_t rc;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
void tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
struct timeout_t *timeout_cap;
struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
if (rc)
goto duration;
if (be32_to_cpu(tpm_cmd.header.out.length)
!= 4 * sizeof(u32))
goto duration;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
timeout = be32_to_cpu(timeout_cap->a);
if (timeout)
chip->vendor.timeout_a = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
chip->vendor.timeout_b = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
chip->vendor.timeout_c = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
chip->vendor.timeout_d = usecs_to_jiffies(timeout);
duration:
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
return;
if (be32_to_cpu(tpm_cmd.header.out.return_code)
!= 3 * sizeof(u32))
return;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
*/
if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
chip->vendor.duration[TPM_SHORT] = HZ;
chip->vendor.duration[TPM_MEDIUM] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
chip->vendor.duration[TPM_LONG] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
void tpm_continue_selftest(struct tpm_chip *chip)
{
u8 data[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* length */
0, 0, 0, 83, /* TPM_ORD_GetCapability */
};
tpm_transmit(chip, data, sizeof(data));
}
EXPORT_SYMBOL_GPL(tpm_continue_selftest);
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent enabled state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent active state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_active);
ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
"attempting to determine the owner state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", cap.owned);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
ssize_t tpm_show_temp_deactivated(struct device * dev,
struct device_attribute * attr, char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
"attempting to determine the temporary state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
/*
* tpm_chip_find_get - return tpm_chip for given chip number
*/
static struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->dev->driver->owner)) {
chip = pos;
break;
}
}
rcu_read_unlock();
return chip;
}
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
static struct tpm_input_header pcrread_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
.ordinal = TPM_ORDINAL_PCRREAD
};
int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
"attempting to read a pcr value");
if (rc == 0)
memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
TPM_DIGEST_SIZE);
return rc;
}
/**
* tpm_pcr_read - read a pcr value
* @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
* @res_buf: TPM_PCR value
* size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
* @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
#define EXTEND_PCR_RESULT_SIZE 34
static struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
.ordinal = TPM_ORD_PCR_EXTEND
};
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
int rc;
struct tpm_chip *chip;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"attempting extend a PCR value");
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
int tpm_send(u32 chip_num, void *cmd, size_t buflen)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
u8 digest[TPM_DIGEST_SIZE];
ssize_t rc;
int i, j, num_pcrs;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS");
if (rc)
return 0;
num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
rc = __tpm_pcr_read(chip, i, digest);
if (rc)
break;
str += sprintf(str, "PCR-%02d: ", i);
for (j = 0; j < TPM_DIGEST_SIZE; j++)
str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_pcrs);
#define READ_PUBEK_RESULT_SIZE 314
#define TPM_ORD_READPUBEK cpu_to_be32(124)
struct tpm_input_header tpm_readpubek_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(30),
.ordinal = TPM_ORD_READPUBEK
};
ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
struct tpm_cmd_t tpm_cmd;
ssize_t err;
int i, rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_readpubek_header;
err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
"attempting to read the PUBEK");
if (err)
goto out;
/*
ignore header 10 bytes
algorithm 32 bits (1 == RSA )
encscheme 16 bits
sigscheme 16 bits
parameters (RSA 12->bytes: keybit, #primes, expbit)
keylenbytes 32 bits
256 byte modulus
ignore checksum 20 bytes
*/
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
"Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
" %02X %02X %02X %02X %02X %02X %02X %02X\n"
"Modulus length: %d\nModulus: \n",
data[10], data[11], data[12], data[13], data[14],
data[15], data[16], data[17], data[22], data[23],
data[24], data[25], data[26], data[27], data[28],
data[29], data[30], data[31], data[32], data[33],
be32_to_cpu(*((__be32 *) (data + 34))));
for (i = 0; i < 256; i++) {
str += sprintf(str, "%02X ", data[i + 38]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
out:
rc = str - buf;
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_pubek);
ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
ssize_t rc;
char *str = buf;
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version.Major, cap.tpm_version.Minor,
cap.tpm_version.revMajor, cap.tpm_version.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_caps_1_2(struct device * dev,
struct device_attribute * attr, char *buf)
{
cap_t cap;
ssize_t rc;
char *str = buf;
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
cap.tpm_version_1_2.revMajor,
cap.tpm_version_1_2.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return 0;
chip->vendor.cancel(chip);
return count;
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
/*
* Device file system interface to the TPM
*
* It's assured that the chip will be opened just once,
* by the check of is_open variable, which is protected
* by driver_lock.
*/
int tpm_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct tpm_chip *chip = NULL, *pos;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (pos->vendor.miscdev.minor == minor) {
chip = pos;
get_device(chip->dev);
break;
}
}
rcu_read_unlock();
if (!chip)
return -ENODEV;
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_open);
/*
* Called on file close
*/
int tpm_release(struct inode *inode, struct file *file)
{
struct tpm_chip *chip = file->private_data;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
file->private_data = NULL;
atomic_set(&chip->data_pending, 0);
kfree(chip->data_buffer);
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(tpm_release);
ssize_t tpm_write(struct file *file, const char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
size_t in_size = size, out_size;
/* cannot perform a write until the read has cleared
either via tpm_read or a user_read_timer timeout */
while (atomic_read(&chip->data_pending) != 0)
msleep(TPM_TIMEOUT);
mutex_lock(&chip->buffer_mutex);
if (in_size > TPM_BUFSIZE)
in_size = TPM_BUFSIZE;
if (copy_from_user
(chip->data_buffer, (void __user *) buf, in_size)) {
mutex_unlock(&chip->buffer_mutex);
return -EFAULT;
}
/* atomic tpm command send and result receive */
out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
atomic_set(&chip->data_pending, out_size);
mutex_unlock(&chip->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
return in_size;
}
EXPORT_SYMBOL_GPL(tpm_write);
ssize_t tpm_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
ssize_t ret_size;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
if (size < ret_size)
ret_size = size;
mutex_lock(&chip->buffer_mutex);
if (copy_to_user(buf, chip->data_buffer, ret_size))
ret_size = -EFAULT;
mutex_unlock(&chip->buffer_mutex);
}
return ret_size;
}
EXPORT_SYMBOL_GPL(tpm_read);
void tpm_remove_hardware(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL) {
dev_err(dev, "No device data found\n");
return;
}
spin_lock(&driver_lock);
list_del_rcu(&chip->list);
spin_unlock(&driver_lock);
synchronize_rcu();
misc_deregister(&chip->vendor.miscdev);
sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
tpm_bios_log_teardown(chip->bios_dir);
/* write it this way to be explicit (chip->dev == dev) */
put_device(chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
#define TPM_ORD_SAVESTATE cpu_to_be32(152)
#define SAVESTATE_RESULT_SIZE 10
static struct tpm_input_header savestate_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(10),
.ordinal = TPM_ORD_SAVESTATE
};
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
*/
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
int rc;
u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
/* for buggy tpm, flush pcrs with extend to selected dummy */
if (tpm_suspend_pcr) {
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"extending dummy pcr before suspend");
}
/* now do the actual savestate */
cmd.header.in = savestate_header;
rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
"sending savestate before suspend");
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
/*
* Resume from a power safe. The BIOS already restored
* the TPM state.
*/
int tpm_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);
/* In case vendor provided release function, call it too.*/
void tpm_dev_vendor_release(struct tpm_chip *chip)
{
if (chip->vendor.release)
chip->vendor.release(chip->dev);
clear_bit(chip->dev_num, dev_mask);
kfree(chip->vendor.miscdev.name);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
/*
* Once all references to platform device are down to 0,
* release all allocated structures.
*/
void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_dev_vendor_release(chip);
chip->release(dev);
kfree(chip);
}
EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
* Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
* upon errant exit from this function specific probe function should call
* pci_disable_device
*/
struct tpm_chip *tpm_register_hardware(struct device *dev,
const struct tpm_vendor_specific *entry)
{
#define DEVNAME_SIZE 7
char *devname;
struct tpm_chip *chip;
/* Driver specific per-device data */
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
if (chip == NULL || devname == NULL)
goto out_free;
mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
INIT_WORK(&chip->work, timeout_work);
setup_timer(&chip->user_read_timer, user_reader_timeout,
(unsigned long)chip);
memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific));
chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
if (chip->dev_num >= TPM_NUM_DEVICES) {
dev_err(dev, "No available tpm device numbers\n");
goto out_free;
} else if (chip->dev_num == 0)
chip->vendor.miscdev.minor = TPM_MINOR;
else
chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
set_bit(chip->dev_num, dev_mask);
scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
chip->vendor.miscdev.name = devname;
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
chip->release = dev->release;
dev->release = tpm_dev_release;
dev_set_drvdata(dev, chip);
if (misc_register(&chip->vendor.miscdev)) {
dev_err(chip->dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
put_device(chip->dev);
return NULL;
}
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
misc_deregister(&chip->vendor.miscdev);
put_device(chip->dev);
return NULL;
}
chip->bios_dir = tpm_bios_log_setup(devname);
/* Make chip available */
spin_lock(&driver_lock);
list_add_rcu(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
return chip;
out_free:
kfree(chip);
kfree(devname);
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
| /*
* Copyright (C) 2004 IBM Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
* Dave Safford <safford@watson.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
* Specifications at www.trustedcomputinggroup.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
* Note, the TPM chip is not interrupt driven (only polling)
* and can have very long timeouts (minutes!). Hence the unusual
* calls to msleep.
*
*/
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "tpm.h"
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
TPM_NUM_DEVICES = 256,
};
enum tpm_duration {
TPM_SHORT = 0,
TPM_MEDIUM = 1,
TPM_LONG = 2,
TPM_UNDEFINED,
};
#define TPM_MAX_ORDINAL 243
#define TPM_MAX_PROTECTED_ORDINAL 12
#define TPM_PROTECTED_ORDINAL_MASK 0xFF
/*
* Bug workaround - some TPM's don't flush the most
* recently changed pcr on suspend, so force the flush
* with an extend to the selected _unused_ non-volatile pcr.
*/
static int tpm_suspend_pcr;
module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644);
MODULE_PARM_DESC(suspend_pcr,
"PCR to use for dummy writes to faciltate flush on suspend.");
static LIST_HEAD(tpm_chip_list);
static DEFINE_SPINLOCK(driver_lock);
static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
/*
* Array with one entry per ordinal defining the maximum amount
* of time the chip could take to return the result. The ordinal
* designation of short, medium or long is defined in a table in
* TCG Specification TPM Main Part 2 TPM Structures Section 17. The
* values of the SHORT, MEDIUM, and LONG durations are retrieved
* from the chip during initialization with a call to tpm_get_timeouts.
*/
static const u8 tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 5 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 10 */
TPM_SHORT,
};
static const u8 tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
TPM_UNDEFINED, /* 0 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 5 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 10 */
TPM_SHORT,
TPM_MEDIUM,
TPM_LONG,
TPM_LONG,
TPM_MEDIUM, /* 15 */
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM,
TPM_LONG,
TPM_SHORT, /* 20 */
TPM_SHORT,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT, /* 25 */
TPM_SHORT,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 30 */
TPM_LONG,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 35 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 40 */
TPM_LONG,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 45 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_LONG,
TPM_MEDIUM, /* 50 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 55 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 60 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 65 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 70 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 75 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 80 */
TPM_UNDEFINED,
TPM_MEDIUM,
TPM_LONG,
TPM_SHORT,
TPM_UNDEFINED, /* 85 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 90 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 95 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 100 */
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 105 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 110 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 115 */
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 120 */
TPM_LONG,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 125 */
TPM_SHORT,
TPM_LONG,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT, /* 130 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_SHORT,
TPM_MEDIUM,
TPM_UNDEFINED, /* 135 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 140 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 145 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 150 */
TPM_MEDIUM,
TPM_MEDIUM,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 155 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 160 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 165 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_LONG, /* 170 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 175 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_MEDIUM, /* 180 */
TPM_SHORT,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM, /* 185 */
TPM_SHORT,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 190 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 195 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 200 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 205 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_MEDIUM, /* 210 */
TPM_UNDEFINED,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_MEDIUM,
TPM_UNDEFINED, /* 215 */
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT,
TPM_SHORT, /* 220 */
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_SHORT,
TPM_UNDEFINED, /* 225 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 230 */
TPM_LONG,
TPM_MEDIUM,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED, /* 235 */
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_UNDEFINED,
TPM_SHORT, /* 240 */
TPM_UNDEFINED,
TPM_MEDIUM,
};
static void user_reader_timeout(unsigned long ptr)
{
struct tpm_chip *chip = (struct tpm_chip *) ptr;
schedule_work(&chip->work);
}
static void timeout_work(struct work_struct *work)
{
struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
mutex_lock(&chip->buffer_mutex);
atomic_set(&chip->data_pending, 0);
memset(chip->data_buffer, 0, TPM_BUFSIZE);
mutex_unlock(&chip->buffer_mutex);
}
/*
* Returns max number of jiffies to wait
*/
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
u32 ordinal)
{
int duration_idx = TPM_UNDEFINED;
int duration = 0;
if (ordinal < TPM_MAX_ORDINAL)
duration_idx = tpm_ordinal_duration[ordinal];
else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
TPM_MAX_PROTECTED_ORDINAL)
duration_idx =
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
if (duration <= 0)
return 2 * 60 * HZ;
else
return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
/*
* Internal kernel interface to transmit TPM commands
*/
static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
size_t bufsiz)
{
ssize_t rc;
u32 count, ordinal;
unsigned long stop;
count = be32_to_cpu(*((__be32 *) (buf + 2)));
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
dev_err(chip->dev,
"invalid count value %x %zx \n", count, bufsiz);
return -E2BIG;
}
mutex_lock(&chip->tpm_mutex);
if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) {
dev_err(chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
if (chip->vendor.irq)
goto out_recv;
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
u8 status = chip->vendor.status(chip);
if ((status & chip->vendor.req_complete_mask) ==
chip->vendor.req_complete_val)
goto out_recv;
if ((status == chip->vendor.req_canceled)) {
dev_err(chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
msleep(TPM_TIMEOUT); /* CHECK */
rmb();
} while (time_before(jiffies, stop));
chip->vendor.cancel(chip);
dev_err(chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->vendor.recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
dev_err(chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
mutex_unlock(&chip->tpm_mutex);
return rc;
}
#define TPM_DIGEST_SIZE 20
#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
enum tpm_capabilities {
TPM_CAP_FLAG = cpu_to_be32(4),
TPM_CAP_PROP = cpu_to_be32(5),
CAP_VERSION_1_1 = cpu_to_be32(0x06),
CAP_VERSION_1_2 = cpu_to_be32(0x1A)
};
enum tpm_sub_capabilities {
TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
};
static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
int len, const char *desc)
{
int err;
len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
if (len == TPM_ERROR_SIZE) {
err = be32_to_cpu(cmd->header.out.return_code);
dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
return err;
}
return 0;
}
#define TPM_INTERNAL_RESULT_SIZE 200
#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
#define TPM_ORD_GET_CAP cpu_to_be32(101)
static const struct tpm_input_header tpm_getcap_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(22),
.ordinal = TPM_ORD_GET_CAP
};
ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_getcap_header;
if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
tpm_cmd.params.getcap_in.cap = subcap_id;
/*subcap field not necessary */
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
} else {
if (subcap_id == TPM_CAP_FLAG_PERM ||
subcap_id == TPM_CAP_FLAG_VOL)
tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
else
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
}
void tpm_gen_interrupt(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
ssize_t rc;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
void tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
struct timeout_t *timeout_cap;
struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the timeouts");
if (rc)
goto duration;
if (be32_to_cpu(tpm_cmd.header.out.length)
!= 4 * sizeof(u32))
goto duration;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
timeout = be32_to_cpu(timeout_cap->a);
if (timeout)
chip->vendor.timeout_a = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
chip->vendor.timeout_b = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
chip->vendor.timeout_c = usecs_to_jiffies(timeout);
timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
chip->vendor.timeout_d = usecs_to_jiffies(timeout);
duration:
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
return;
if (be32_to_cpu(tpm_cmd.header.out.return_code)
!= 3 * sizeof(u32))
return;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
*/
if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
chip->vendor.duration[TPM_SHORT] = HZ;
chip->vendor.duration[TPM_MEDIUM] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
chip->vendor.duration[TPM_LONG] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
void tpm_continue_selftest(struct tpm_chip *chip)
{
u8 data[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* length */
0, 0, 0, 83, /* TPM_ORD_GetCapability */
};
tpm_transmit(chip, data, sizeof(data));
}
EXPORT_SYMBOL_GPL(tpm_continue_selftest);
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent enabled state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_enabled);
ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
"attempting to determine the permanent active state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_active);
ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr,
char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
"attempting to determine the owner state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", cap.owned);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_owned);
ssize_t tpm_show_temp_deactivated(struct device * dev,
struct device_attribute * attr, char *buf)
{
cap_t cap;
ssize_t rc;
rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
"attempting to determine the temporary state");
if (rc)
return 0;
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_temp_deactivated);
/*
* tpm_chip_find_get - return tpm_chip for given chip number
*/
static struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->dev->driver->owner)) {
chip = pos;
break;
}
}
rcu_read_unlock();
return chip;
}
#define TPM_ORDINAL_PCRREAD cpu_to_be32(21)
#define READ_PCR_RESULT_SIZE 30
static struct tpm_input_header pcrread_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(14),
.ordinal = TPM_ORDINAL_PCRREAD
};
int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
rc = transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
"attempting to read a pcr value");
if (rc == 0)
memcpy(res_buf, cmd.params.pcrread_out.pcr_result,
TPM_DIGEST_SIZE);
return rc;
}
/**
* tpm_pcr_read - read a pcr value
* @chip_num: tpm idx # or ANY
* @pcr_idx: pcr idx to retrieve
* @res_buf: TPM_PCR value
* size of res_buf is 20 bytes (or NULL if you don't care)
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
/**
* tpm_pcr_extend - extend pcr value with hash
* @chip_num: tpm idx # or AN&
* @pcr_idx: pcr idx to extend
* @hash: hash value used to extend pcr value
*
* The TPM driver should be built-in, but for whatever reason it
* isn't, protect against the chip disappearing, by incrementing
* the module usage count.
*/
#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
#define EXTEND_PCR_RESULT_SIZE 34
static struct tpm_input_header pcrextend_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(34),
.ordinal = TPM_ORD_PCR_EXTEND
};
int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
{
struct tpm_cmd_t cmd;
int rc;
struct tpm_chip *chip;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"attempting extend a PCR value");
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
int tpm_send(u32 chip_num, void *cmd, size_t buflen)
{
struct tpm_chip *chip;
int rc;
chip = tpm_chip_find_get(chip_num);
if (chip == NULL)
return -ENODEV;
rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
tpm_chip_put(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
u8 digest[TPM_DIGEST_SIZE];
ssize_t rc;
int i, j, num_pcrs;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS");
if (rc)
return 0;
num_pcrs = be32_to_cpu(cap.num_pcrs);
for (i = 0; i < num_pcrs; i++) {
rc = __tpm_pcr_read(chip, i, digest);
if (rc)
break;
str += sprintf(str, "PCR-%02d: ", i);
for (j = 0; j < TPM_DIGEST_SIZE; j++)
str += sprintf(str, "%02X ", digest[j]);
str += sprintf(str, "\n");
}
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_pcrs);
#define READ_PUBEK_RESULT_SIZE 314
#define TPM_ORD_READPUBEK cpu_to_be32(124)
struct tpm_input_header tpm_readpubek_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(30),
.ordinal = TPM_ORD_READPUBEK
};
ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 *data;
struct tpm_cmd_t tpm_cmd;
ssize_t err;
int i, rc;
char *str = buf;
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_readpubek_header;
err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
"attempting to read the PUBEK");
if (err)
goto out;
/*
ignore header 10 bytes
algorithm 32 bits (1 == RSA )
encscheme 16 bits
sigscheme 16 bits
parameters (RSA 12->bytes: keybit, #primes, expbit)
keylenbytes 32 bits
256 byte modulus
ignore checksum 20 bytes
*/
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
"Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
" %02X %02X %02X %02X %02X %02X %02X %02X\n"
"Modulus length: %d\nModulus: \n",
data[10], data[11], data[12], data[13], data[14],
data[15], data[16], data[17], data[22], data[23],
data[24], data[25], data[26], data[27], data[28],
data[29], data[30], data[31], data[32], data[33],
be32_to_cpu(*((__be32 *) (data + 34))));
for (i = 0; i < 256; i++) {
str += sprintf(str, "%02X ", data[i + 38]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
out:
rc = str - buf;
return rc;
}
EXPORT_SYMBOL_GPL(tpm_show_pubek);
ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
char *buf)
{
cap_t cap;
ssize_t rc;
char *str = buf;
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version.Major, cap.tpm_version.Minor,
cap.tpm_version.revMajor, cap.tpm_version.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps);
ssize_t tpm_show_caps_1_2(struct device * dev,
struct device_attribute * attr, char *buf)
{
cap_t cap;
ssize_t rc;
char *str = buf;
rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
str += sprintf(str, "Manufacturer: 0x%x\n",
be32_to_cpu(cap.manufacturer_id));
rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
"attempting to determine the 1.2 version");
if (rc)
return 0;
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor,
cap.tpm_version_1_2.revMajor,
cap.tpm_version_1_2.revMinor);
return str - buf;
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return 0;
chip->vendor.cancel(chip);
return count;
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
/*
* Device file system interface to the TPM
*
* It's assured that the chip will be opened just once,
* by the check of is_open variable, which is protected
* by driver_lock.
*/
int tpm_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct tpm_chip *chip = NULL, *pos;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (pos->vendor.miscdev.minor == minor) {
chip = pos;
get_device(chip->dev);
break;
}
}
rcu_read_unlock();
if (!chip)
return -ENODEV;
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_open);
/*
* Called on file close
*/
int tpm_release(struct inode *inode, struct file *file)
{
struct tpm_chip *chip = file->private_data;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
file->private_data = NULL;
atomic_set(&chip->data_pending, 0);
kfree(chip->data_buffer);
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(tpm_release);
ssize_t tpm_write(struct file *file, const char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
size_t in_size = size, out_size;
/* cannot perform a write until the read has cleared
either via tpm_read or a user_read_timer timeout */
while (atomic_read(&chip->data_pending) != 0)
msleep(TPM_TIMEOUT);
mutex_lock(&chip->buffer_mutex);
if (in_size > TPM_BUFSIZE)
in_size = TPM_BUFSIZE;
if (copy_from_user
(chip->data_buffer, (void __user *) buf, in_size)) {
mutex_unlock(&chip->buffer_mutex);
return -EFAULT;
}
/* atomic tpm command send and result receive */
out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
atomic_set(&chip->data_pending, out_size);
mutex_unlock(&chip->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
return in_size;
}
EXPORT_SYMBOL_GPL(tpm_write);
ssize_t tpm_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct tpm_chip *chip = file->private_data;
ssize_t ret_size;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
if (size < ret_size)
ret_size = size;
mutex_lock(&chip->buffer_mutex);
if (copy_to_user(buf, chip->data_buffer, ret_size))
ret_size = -EFAULT;
mutex_unlock(&chip->buffer_mutex);
}
return ret_size;
}
EXPORT_SYMBOL_GPL(tpm_read);
void tpm_remove_hardware(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL) {
dev_err(dev, "No device data found\n");
return;
}
spin_lock(&driver_lock);
list_del_rcu(&chip->list);
spin_unlock(&driver_lock);
synchronize_rcu();
misc_deregister(&chip->vendor.miscdev);
sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
tpm_bios_log_teardown(chip->bios_dir);
/* write it this way to be explicit (chip->dev == dev) */
put_device(chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
#define TPM_ORD_SAVESTATE cpu_to_be32(152)
#define SAVESTATE_RESULT_SIZE 10
static struct tpm_input_header savestate_header = {
.tag = TPM_TAG_RQU_COMMAND,
.length = cpu_to_be32(10),
.ordinal = TPM_ORD_SAVESTATE
};
/*
* We are about to suspend. Save the TPM state
* so that it can be restored.
*/
int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
int rc;
u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
if (chip == NULL)
return -ENODEV;
/* for buggy tpm, flush pcrs with extend to selected dummy */
if (tpm_suspend_pcr) {
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
"extending dummy pcr before suspend");
}
/* now do the actual savestate */
cmd.header.in = savestate_header;
rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
"sending savestate before suspend");
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pm_suspend);
/*
* Resume from a power safe. The BIOS already restored
* the TPM state.
*/
int tpm_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip == NULL)
return -ENODEV;
return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);
/* In case vendor provided release function, call it too.*/
void tpm_dev_vendor_release(struct tpm_chip *chip)
{
if (chip->vendor.release)
chip->vendor.release(chip->dev);
clear_bit(chip->dev_num, dev_mask);
kfree(chip->vendor.miscdev.name);
}
EXPORT_SYMBOL_GPL(tpm_dev_vendor_release);
/*
* Once all references to platform device are down to 0,
* release all allocated structures.
*/
void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_dev_vendor_release(chip);
chip->release(dev);
kfree(chip);
}
EXPORT_SYMBOL_GPL(tpm_dev_release);
/*
* Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
* upon errant exit from this function specific probe function should call
* pci_disable_device
*/
struct tpm_chip *tpm_register_hardware(struct device *dev,
const struct tpm_vendor_specific *entry)
{
#define DEVNAME_SIZE 7
char *devname;
struct tpm_chip *chip;
/* Driver specific per-device data */
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
if (chip == NULL || devname == NULL)
goto out_free;
mutex_init(&chip->buffer_mutex);
mutex_init(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
INIT_WORK(&chip->work, timeout_work);
setup_timer(&chip->user_read_timer, user_reader_timeout,
(unsigned long)chip);
memcpy(&chip->vendor, entry, sizeof(struct tpm_vendor_specific));
chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
if (chip->dev_num >= TPM_NUM_DEVICES) {
dev_err(dev, "No available tpm device numbers\n");
goto out_free;
} else if (chip->dev_num == 0)
chip->vendor.miscdev.minor = TPM_MINOR;
else
chip->vendor.miscdev.minor = MISC_DYNAMIC_MINOR;
set_bit(chip->dev_num, dev_mask);
scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
chip->vendor.miscdev.name = devname;
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
chip->release = dev->release;
dev->release = tpm_dev_release;
dev_set_drvdata(dev, chip);
if (misc_register(&chip->vendor.miscdev)) {
dev_err(chip->dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
put_device(chip->dev);
return NULL;
}
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
misc_deregister(&chip->vendor.miscdev);
put_device(chip->dev);
return NULL;
}
chip->bios_dir = tpm_bios_log_setup(devname);
/* Make chip available */
spin_lock(&driver_lock);
list_add_rcu(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
return chip;
out_free:
kfree(chip);
kfree(devname);
return NULL;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
|
82,820,302,477,370 | /*
* Handle firewalling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
* Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/route.h>
#include <asm/uaccess.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#define skb_origaddr(skb) (((struct bridge_skb_cb *) \
(skb->nf_bridge->data))->daddr.ipv4)
#define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
#define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
static int brnf_filter_vlan_tagged __read_mostly = 0;
static int brnf_filter_pppoe_tagged __read_mostly = 0;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
#define brnf_call_arptables 1
#define brnf_filter_vlan_tagged 0
#define brnf_filter_pppoe_tagged 0
#endif
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
return skb->protocol;
else if (skb->protocol == htons(ETH_P_8021Q))
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
else
return 0;
}
#define IS_VLAN_IP(skb) \
(vlan_proto(skb) == htons(ETH_P_IP) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_IPV6(skb) \
(vlan_proto(skb) == htons(ETH_P_IPV6) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_ARP(skb) \
(vlan_proto(skb) == htons(ETH_P_ARP) && \
brnf_filter_vlan_tagged)
static inline __be16 pppoe_proto(const struct sk_buff *skb)
{
return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
sizeof(struct pppoe_hdr)));
}
#define IS_PPPOE_IP(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IP) && \
brnf_filter_pppoe_tagged)
#define IS_PPPOE_IPV6(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
};
/*
* Initialize bogus route table used to keep netfilter happy.
* Currently, we fill in the PMTU entry because netfilter
* refragmentation needs it, and the rt_flags entry because
* ipt_REJECT needs it. Future netfilter modules might
* require us to fill additional fields.
*/
void br_netfilter_rtable_init(struct net_bridge *br)
{
struct rtable *rt = &br->fake_rtable;
atomic_set(&rt->dst.__refcnt, 1);
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_metric_set(&rt->dst, RTAX_MTU, 1500);
rt->dst.flags = DST_NOXFRM;
rt->dst.ops = &fake_dst_ops;
}
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
struct net_bridge_port *port;
port = br_port_get_rcu(dev);
return port ? &port->br->fake_rtable : NULL;
}
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
struct net_bridge_port *port;
port = br_port_get_rcu(dev);
return port ? port->br->dev : NULL;
}
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
{
skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
if (likely(skb->nf_bridge))
atomic_set(&(skb->nf_bridge->use), 1);
return skb->nf_bridge;
}
static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (atomic_read(&nf_bridge->use) > 1) {
struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
if (tmp) {
memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
atomic_set(&tmp->use, 1);
}
nf_bridge_put(nf_bridge);
nf_bridge = tmp;
}
return nf_bridge;
}
static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_push(skb, len);
skb->network_header -= len;
}
static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull(skb, len);
skb->network_header += len;
}
static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull_rcsum(skb, len);
skb->network_header += len;
}
static inline void nf_bridge_save_header(struct sk_buff *skb)
{
int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
skb_copy_from_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
}
static inline void nf_bridge_update_protocol(struct sk_buff *skb)
{
if (skb->nf_bridge->mask & BRNF_8021Q)
skb->protocol = htons(ETH_P_8021Q);
else if (skb->nf_bridge->mask & BRNF_PPPoE)
skb->protocol = htons(ETH_P_PPP_SES);
}
/* When handing a packet over to the IP layer
* check whether we have a skb that is in the
* expected format
*/
static int br_parse_ip_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
u32 len;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
/* Zero out the CB buffer if no options present */
if (iph->ihl == 5) {
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
return 0;
}
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
goto inhdr_error;
/* Check correct handling of SRR option */
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
goto drop;
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
/* Fill in the header for fragmented IP packets handled by
* the IPv4 connection tracking code.
*/
int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
unsigned int header_size;
nf_bridge_update_protocol(skb);
header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow_head(skb, header_size);
if (err)
return err;
skb_copy_to_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
__skb_push(skb, nf_bridge_encap_header_len(skb));
return 0;
}
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
* bridge PRE_ROUTING hook. */
static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct rtable *rt;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_set_noref(skb, &rt->dst);
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
}
/* Obtain the correct destination MAC address, while preserving the original
* source MAC address. If we already know this address, we just copy it. If we
* don't, we use the neighbour framework to find out. In both cases, we make
* sure that br_handle_frame_finish() is called afterwards.
*/
static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct dst_entry *dst;
skb->dev = bridge_parent(skb->dev);
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
if (dst->hh) {
neigh_hh_bridge(dst->hh, skb);
skb->dev = nf_bridge->physindev;
return br_handle_frame_finish(skb);
} else if (dst->neighbour) {
/* the neighbour function below overwrites the complete
* MAC header, so we save the Ethernet source address and
* protocol number. */
skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->mask |= BRNF_BRIDGED_DNAT;
return dst->neighbour->output(skb);
}
free_skb:
kfree_skb(skb);
return 0;
}
/* This requires some explaining. If DNAT has taken place,
* we will need to fix up the destination Ethernet address.
*
* There are two cases to consider:
* 1. The packet was DNAT'ed to a device in the same bridge
* port group as it was received on. We can still bridge
* the packet.
* 2. The packet was DNAT'ed to a different device, either
* a non-bridged device or another bridge port group.
* The packet will need to be routed.
*
* The correct way of distinguishing between these two cases is to
* call ip_route_input() and to look at skb->dst->dev, which is
* changed to the destination device if ip_route_input() succeeds.
*
* Let's first consider the case that ip_route_input() succeeds:
*
* If the output device equals the logical bridge device the packet
* came in on, we can consider this bridging. The corresponding MAC
* address will be obtained in br_nf_pre_routing_finish_bridge.
* Otherwise, the packet is considered to be routed and we just
* change the destination MAC address so that the packet will
* later be passed up to the IP stack to be routed. For a redirected
* packet, ip_route_input() will give back the localhost as output device,
* which differs from the bridge device.
*
* Let's now consider the case that ip_route_input() fails:
*
* This can be because the destination address is martian, in which case
* the packet will be dropped.
* If IP forwarding is disabled, ip_route_input() will fail, while
* ip_route_output_key() can return success. The source
* address for ip_route_output_key() is set to zero, so ip_route_output_key()
* thinks we're handling a locally generated packet and won't care
* if IP forwarding is enabled. If the output device equals the logical bridge
* device, we proceed as if ip_route_input() succeeded. If it differs from the
* logical bridge port or if ip_route_output_key() fails we drop the packet.
*/
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct rtable *rt;
int err;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
if (dnat_took_place(skb)) {
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
/* If err equals -EHOSTUNREACH the error is due to a
* martian destination or due to the fact that
* forwarding is disabled. For most martian packets,
* ip_route_output_key() will fail. It won't fail for 2 types of
* martian destinations: loopback destinations and destination
* 0.0.0.0. In both cases the packet will be dropped because the
* destination is the loopback device and not the bridge. */
if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
goto free_skb;
rt = ip_route_output(dev_net(dev), iph->daddr, 0,
RT_TOS(iph->tos), 0);
if (!IS_ERR(rt)) {
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (rt->dst.dev == dev) {
skb_dst_set(skb, &rt->dst);
goto bridged_dnat;
}
ip_rt_put(rt);
}
free_skb:
kfree_skb(skb);
return 0;
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE,
NF_BR_PRE_ROUTING,
skb, skb->dev, NULL,
br_nf_pre_routing_finish_bridge,
1);
return 0;
}
memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
skb->pkt_type = PACKET_HOST;
}
} else {
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_set_noref(skb, &rt->dst);
}
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
}
/* Some common code for IPv4/IPv6 */
static struct net_device *setup_pre_routing(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
}
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = bridge_parent(skb->dev);
if (skb->protocol == htons(ETH_P_8021Q))
nf_bridge->mask |= BRNF_8021Q;
else if (skb->protocol == htons(ETH_P_PPP_SES))
nf_bridge->mask |= BRNF_PPPoE;
return skb->dev;
}
/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
static int check_hbh_len(struct sk_buff *skb)
{
unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
u32 pkt_len;
const unsigned char *nh = skb_network_header(skb);
int off = raw - nh;
int len = (raw[1] + 1) << 3;
if ((raw + len) - skb->data > skb_headlen(skb))
goto bad;
off += 2;
len -= 2;
while (len > 0) {
int optlen = nh[off + 1] + 2;
switch (nh[off]) {
case IPV6_TLV_PAD0:
optlen = 1;
break;
case IPV6_TLV_PADN:
break;
case IPV6_TLV_JUMBO:
if (nh[off + 1] != 4 || (off & 3) != 2)
goto bad;
pkt_len = ntohl(*(__be32 *) (nh + off + 2));
if (pkt_len <= IPV6_MAXPLEN ||
ipv6_hdr(skb)->payload_len)
goto bad;
if (pkt_len > skb->len - sizeof(struct ipv6hdr))
goto bad;
if (pskb_trim_rcsum(skb,
pkt_len + sizeof(struct ipv6hdr)))
goto bad;
nh = skb_network_header(skb);
break;
default:
if (optlen > len)
goto bad;
break;
}
off += optlen;
len -= optlen;
}
if (len == 0)
return 0;
bad:
return -1;
}
/* Replicate the checks that IPv6 does on packet reception and pass the packet
* to ip6tables, which doesn't support NAT, so things are fairly simple. */
static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (skb->len < sizeof(struct ipv6hdr))
return NF_DROP;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return NF_DROP;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
return NF_DROP;
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
return NF_DROP;
}
if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
return NF_DROP;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
return NF_STOLEN;
}
/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
* Replicate the checks that IPv4 does on packet reception.
* Set skb->dev to the bridge device (i.e. parent of the
* receiving device) to make netfilter happy, the REDIRECT
* target in particular. Save the original destination IP
* address to be able to detect DNAT afterwards. */
static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct net_bridge_port *p;
struct net_bridge *br;
__u32 len = nf_bridge_encap_header_len(skb);
if (unlikely(!pskb_may_pull(skb, len)))
return NF_DROP;
p = br_port_get_rcu(in);
if (p == NULL)
return NF_DROP;
br = p->br;
if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
}
if (!brnf_call_iptables && !br->nf_call_iptables)
return NF_ACCEPT;
if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
!IS_PPPOE_IP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
if (br_parse_ip_options(skb))
return NF_DROP;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
store_orig_dstaddr(skb);
skb->protocol = htons(ETH_P_IP);
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
}
/* PF_BRIDGE/LOCAL_IN ************************************************/
/* The packet is locally destined, which requires a real
* dst_entry, so detach the fake one. On the way up, the
* packet would pass through PRE_ROUTING again (which already
* took place when the packet entered the bridge), but we
* register an IPv4 PRE_ROUTING 'sabotage' hook that will
* prevent this from happening. */
static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct rtable *rt = skb_rtable(skb);
if (rt && rt == bridge_parent_rtable(in))
skb_dst_drop(skb);
return NF_ACCEPT;
}
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *in;
if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
nf_bridge_update_protocol(skb);
} else {
in = *((struct net_device **)(skb->cb));
}
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
skb->dev, br_forward_finish, 1);
return 0;
}
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
* because of the physdev module. For ARP, indev and outdev are the
* bridge ports. */
static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_bridge_info *nf_bridge;
struct net_device *parent;
u_int8_t pf;
if (!skb->nf_bridge)
return NF_ACCEPT;
/* Need exclusive nf_bridge_info since we might have multiple
* different physoutdevs. */
if (!nf_bridge_unshare(skb))
return NF_DROP;
parent = bridge_parent(out);
if (!parent)
return NF_DROP;
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
nf_bridge = skb->nf_bridge;
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
}
if (br_parse_ip_options(skb))
return NF_DROP;
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
if (pf == PF_INET)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
br_nf_forward_finish);
return NF_STOLEN;
}
static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct net_bridge_port *p;
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
p = br_port_get_rcu(out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
if (!brnf_call_arptables && !br->nf_call_arptables)
return NF_ACCEPT;
if (skb->protocol != htons(ETH_P_ARP)) {
if (!IS_VLAN_ARP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
}
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);
return NF_ACCEPT;
}
*d = (struct net_device *)in;
NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
(struct net_device *)out, br_nf_forward_finish);
return NF_STOLEN;
}
#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
int ret;
if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb)) {
if (br_parse_ip_options(skb))
/* Drop invalid packet */
return NF_DROP;
ret = ip_fragment(skb, br_dev_queue_push_xmit);
} else
ret = br_dev_queue_push_xmit(skb);
return ret;
}
#else
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
return br_dev_queue_push_xmit(skb);
}
#endif
/* PF_BRIDGE/POST_ROUTING ********************************************/
static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *realoutdev = bridge_parent(skb->dev);
u_int8_t pf;
if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
return NF_ACCEPT;
if (!realoutdev)
return NF_DROP;
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
* about the value of skb->pkt_type. */
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
}
nf_bridge_pull_encap_header(skb);
nf_bridge_save_header(skb);
if (pf == PF_INET)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
br_nf_dev_queue_xmit);
return NF_STOLEN;
}
/* IP/SABOTAGE *****************************************************/
/* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
* for the second time. */
static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
if (skb->nf_bridge &&
!(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
return NF_STOP;
}
return NF_ACCEPT;
}
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{
.hook = br_nf_pre_routing,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_local_in,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_forward_ip,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF - 1,
},
{
.hook = br_nf_forward_arp,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_post_routing,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_LAST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_FIRST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_FIRST,
},
};
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(ctl_table * ctl, int write,
void __user * buffer, size_t * lenp, loff_t * ppos)
{
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
return ret;
}
static ctl_table brnf_table[] = {
{
.procname = "bridge-nf-call-arptables",
.data = &brnf_call_arptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-iptables",
.data = &brnf_call_iptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-ip6tables",
.data = &brnf_call_ip6tables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-vlan-tagged",
.data = &brnf_filter_vlan_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-pppoe-tagged",
.data = &brnf_filter_pppoe_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{ }
};
static struct ctl_path brnf_path[] = {
{ .procname = "net", },
{ .procname = "bridge", },
{ }
};
#endif
int __init br_netfilter_init(void)
{
int ret;
ret = dst_entries_init(&fake_dst_ops);
if (ret < 0)
return ret;
ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
if (ret < 0) {
dst_entries_destroy(&fake_dst_ops);
return ret;
}
#ifdef CONFIG_SYSCTL
brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
dst_entries_destroy(&fake_dst_ops);
return -ENOMEM;
}
#endif
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
}
void br_netfilter_fini(void)
{
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(brnf_sysctl_header);
#endif
dst_entries_destroy(&fake_dst_ops);
}
| /*
* Handle firewalling
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
* Bart De Schuymer <bdschuym@pandora.be>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Lennert dedicates this file to Kerstin Wurdinger.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_arp.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/route.h>
#include <asm/uaccess.h>
#include "br_private.h"
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#define skb_origaddr(skb) (((struct bridge_skb_cb *) \
(skb->nf_bridge->data))->daddr.ipv4)
#define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
#define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables __read_mostly = 1;
static int brnf_call_ip6tables __read_mostly = 1;
static int brnf_call_arptables __read_mostly = 1;
static int brnf_filter_vlan_tagged __read_mostly = 0;
static int brnf_filter_pppoe_tagged __read_mostly = 0;
#else
#define brnf_call_iptables 1
#define brnf_call_ip6tables 1
#define brnf_call_arptables 1
#define brnf_filter_vlan_tagged 0
#define brnf_filter_pppoe_tagged 0
#endif
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
return skb->protocol;
else if (skb->protocol == htons(ETH_P_8021Q))
return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
else
return 0;
}
#define IS_VLAN_IP(skb) \
(vlan_proto(skb) == htons(ETH_P_IP) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_IPV6(skb) \
(vlan_proto(skb) == htons(ETH_P_IPV6) && \
brnf_filter_vlan_tagged)
#define IS_VLAN_ARP(skb) \
(vlan_proto(skb) == htons(ETH_P_ARP) && \
brnf_filter_vlan_tagged)
static inline __be16 pppoe_proto(const struct sk_buff *skb)
{
return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
sizeof(struct pppoe_hdr)));
}
#define IS_PPPOE_IP(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IP) && \
brnf_filter_pppoe_tagged)
#define IS_PPPOE_IPV6(skb) \
(skb->protocol == htons(ETH_P_PPP_SES) && \
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
};
/*
* Initialize bogus route table used to keep netfilter happy.
* Currently, we fill in the PMTU entry because netfilter
* refragmentation needs it, and the rt_flags entry because
* ipt_REJECT needs it. Future netfilter modules might
* require us to fill additional fields.
*/
void br_netfilter_rtable_init(struct net_bridge *br)
{
struct rtable *rt = &br->fake_rtable;
atomic_set(&rt->dst.__refcnt, 1);
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_metric_set(&rt->dst, RTAX_MTU, 1500);
rt->dst.flags = DST_NOXFRM;
rt->dst.ops = &fake_dst_ops;
}
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
struct net_bridge_port *port;
port = br_port_get_rcu(dev);
return port ? &port->br->fake_rtable : NULL;
}
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
struct net_bridge_port *port;
port = br_port_get_rcu(dev);
return port ? port->br->dev : NULL;
}
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
{
skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
if (likely(skb->nf_bridge))
atomic_set(&(skb->nf_bridge->use), 1);
return skb->nf_bridge;
}
static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (atomic_read(&nf_bridge->use) > 1) {
struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
if (tmp) {
memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
atomic_set(&tmp->use, 1);
}
nf_bridge_put(nf_bridge);
nf_bridge = tmp;
}
return nf_bridge;
}
static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_push(skb, len);
skb->network_header -= len;
}
static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull(skb, len);
skb->network_header += len;
}
static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
{
unsigned int len = nf_bridge_encap_header_len(skb);
skb_pull_rcsum(skb, len);
skb->network_header += len;
}
static inline void nf_bridge_save_header(struct sk_buff *skb)
{
int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
skb_copy_from_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
}
static inline void nf_bridge_update_protocol(struct sk_buff *skb)
{
if (skb->nf_bridge->mask & BRNF_8021Q)
skb->protocol = htons(ETH_P_8021Q);
else if (skb->nf_bridge->mask & BRNF_PPPoE)
skb->protocol = htons(ETH_P_PPP_SES);
}
/* When handing a packet over to the IP layer
* check whether we have a skb that is in the
* expected format
*/
static int br_parse_ip_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
u32 len;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (iph->ihl == 5)
return 0;
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
goto inhdr_error;
/* Check correct handling of SRR option */
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
goto drop;
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
/* Fill in the header for fragmented IP packets handled by
* the IPv4 connection tracking code.
*/
int nf_bridge_copy_header(struct sk_buff *skb)
{
int err;
unsigned int header_size;
nf_bridge_update_protocol(skb);
header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
err = skb_cow_head(skb, header_size);
if (err)
return err;
skb_copy_to_linear_data_offset(skb, -header_size,
skb->nf_bridge->data, header_size);
__skb_push(skb, nf_bridge_encap_header_len(skb));
return 0;
}
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
* bridge PRE_ROUTING hook. */
static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct rtable *rt;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_set_noref(skb, &rt->dst);
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
}
/* Obtain the correct destination MAC address, while preserving the original
* source MAC address. If we already know this address, we just copy it. If we
* don't, we use the neighbour framework to find out. In both cases, we make
* sure that br_handle_frame_finish() is called afterwards.
*/
static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct dst_entry *dst;
skb->dev = bridge_parent(skb->dev);
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
if (dst->hh) {
neigh_hh_bridge(dst->hh, skb);
skb->dev = nf_bridge->physindev;
return br_handle_frame_finish(skb);
} else if (dst->neighbour) {
/* the neighbour function below overwrites the complete
* MAC header, so we save the Ethernet source address and
* protocol number. */
skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->mask |= BRNF_BRIDGED_DNAT;
return dst->neighbour->output(skb);
}
free_skb:
kfree_skb(skb);
return 0;
}
/* This requires some explaining. If DNAT has taken place,
* we will need to fix up the destination Ethernet address.
*
* There are two cases to consider:
* 1. The packet was DNAT'ed to a device in the same bridge
* port group as it was received on. We can still bridge
* the packet.
* 2. The packet was DNAT'ed to a different device, either
* a non-bridged device or another bridge port group.
* The packet will need to be routed.
*
* The correct way of distinguishing between these two cases is to
* call ip_route_input() and to look at skb->dst->dev, which is
* changed to the destination device if ip_route_input() succeeds.
*
* Let's first consider the case that ip_route_input() succeeds:
*
* If the output device equals the logical bridge device the packet
* came in on, we can consider this bridging. The corresponding MAC
* address will be obtained in br_nf_pre_routing_finish_bridge.
* Otherwise, the packet is considered to be routed and we just
* change the destination MAC address so that the packet will
* later be passed up to the IP stack to be routed. For a redirected
* packet, ip_route_input() will give back the localhost as output device,
* which differs from the bridge device.
*
* Let's now consider the case that ip_route_input() fails:
*
* This can be because the destination address is martian, in which case
* the packet will be dropped.
* If IP forwarding is disabled, ip_route_input() will fail, while
* ip_route_output_key() can return success. The source
* address for ip_route_output_key() is set to zero, so ip_route_output_key()
* thinks we're handling a locally generated packet and won't care
* if IP forwarding is enabled. If the output device equals the logical bridge
* device, we proceed as if ip_route_input() succeeded. If it differs from the
* logical bridge port or if ip_route_output_key() fails we drop the packet.
*/
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct rtable *rt;
int err;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
if (dnat_took_place(skb)) {
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
/* If err equals -EHOSTUNREACH the error is due to a
* martian destination or due to the fact that
* forwarding is disabled. For most martian packets,
* ip_route_output_key() will fail. It won't fail for 2 types of
* martian destinations: loopback destinations and destination
* 0.0.0.0. In both cases the packet will be dropped because the
* destination is the loopback device and not the bridge. */
if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
goto free_skb;
rt = ip_route_output(dev_net(dev), iph->daddr, 0,
RT_TOS(iph->tos), 0);
if (!IS_ERR(rt)) {
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (rt->dst.dev == dev) {
skb_dst_set(skb, &rt->dst);
goto bridged_dnat;
}
ip_rt_put(rt);
}
free_skb:
kfree_skb(skb);
return 0;
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE,
NF_BR_PRE_ROUTING,
skb, skb->dev, NULL,
br_nf_pre_routing_finish_bridge,
1);
return 0;
}
memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
skb->pkt_type = PACKET_HOST;
}
} else {
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
skb_dst_set_noref(skb, &rt->dst);
}
skb->dev = nf_bridge->physindev;
nf_bridge_update_protocol(skb);
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
br_handle_frame_finish, 1);
return 0;
}
/* Some common code for IPv4/IPv6 */
static struct net_device *setup_pre_routing(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
}
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = bridge_parent(skb->dev);
if (skb->protocol == htons(ETH_P_8021Q))
nf_bridge->mask |= BRNF_8021Q;
else if (skb->protocol == htons(ETH_P_PPP_SES))
nf_bridge->mask |= BRNF_PPPoE;
return skb->dev;
}
/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
static int check_hbh_len(struct sk_buff *skb)
{
unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
u32 pkt_len;
const unsigned char *nh = skb_network_header(skb);
int off = raw - nh;
int len = (raw[1] + 1) << 3;
if ((raw + len) - skb->data > skb_headlen(skb))
goto bad;
off += 2;
len -= 2;
while (len > 0) {
int optlen = nh[off + 1] + 2;
switch (nh[off]) {
case IPV6_TLV_PAD0:
optlen = 1;
break;
case IPV6_TLV_PADN:
break;
case IPV6_TLV_JUMBO:
if (nh[off + 1] != 4 || (off & 3) != 2)
goto bad;
pkt_len = ntohl(*(__be32 *) (nh + off + 2));
if (pkt_len <= IPV6_MAXPLEN ||
ipv6_hdr(skb)->payload_len)
goto bad;
if (pkt_len > skb->len - sizeof(struct ipv6hdr))
goto bad;
if (pskb_trim_rcsum(skb,
pkt_len + sizeof(struct ipv6hdr)))
goto bad;
nh = skb_network_header(skb);
break;
default:
if (optlen > len)
goto bad;
break;
}
off += optlen;
len -= optlen;
}
if (len == 0)
return 0;
bad:
return -1;
}
/* Replicate the checks that IPv6 does on packet reception and pass the packet
* to ip6tables, which doesn't support NAT, so things are fairly simple. */
static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct ipv6hdr *hdr;
u32 pkt_len;
if (skb->len < sizeof(struct ipv6hdr))
return NF_DROP;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
return NF_DROP;
pkt_len = ntohs(hdr->payload_len);
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
return NF_DROP;
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
return NF_DROP;
}
if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
return NF_DROP;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
return NF_STOLEN;
}
/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
* Replicate the checks that IPv4 does on packet reception.
* Set skb->dev to the bridge device (i.e. parent of the
* receiving device) to make netfilter happy, the REDIRECT
* target in particular. Save the original destination IP
* address to be able to detect DNAT afterwards. */
static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct net_bridge_port *p;
struct net_bridge *br;
__u32 len = nf_bridge_encap_header_len(skb);
if (unlikely(!pskb_may_pull(skb, len)))
return NF_DROP;
p = br_port_get_rcu(in);
if (p == NULL)
return NF_DROP;
br = p->br;
if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
}
if (!brnf_call_iptables && !br->nf_call_iptables)
return NF_ACCEPT;
if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
!IS_PPPOE_IP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
if (br_parse_ip_options(skb))
return NF_DROP;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
store_orig_dstaddr(skb);
skb->protocol = htons(ETH_P_IP);
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish);
return NF_STOLEN;
}
/* PF_BRIDGE/LOCAL_IN ************************************************/
/* The packet is locally destined, which requires a real
* dst_entry, so detach the fake one. On the way up, the
* packet would pass through PRE_ROUTING again (which already
* took place when the packet entered the bridge), but we
* register an IPv4 PRE_ROUTING 'sabotage' hook that will
* prevent this from happening. */
static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct rtable *rt = skb_rtable(skb);
if (rt && rt == bridge_parent_rtable(in))
skb_dst_drop(skb);
return NF_ACCEPT;
}
/* PF_BRIDGE/FORWARD *************************************************/
static int br_nf_forward_finish(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *in;
if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
nf_bridge->mask ^= BRNF_PKT_TYPE;
}
nf_bridge_update_protocol(skb);
} else {
in = *((struct net_device **)(skb->cb));
}
nf_bridge_push_encap_header(skb);
NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
skb->dev, br_forward_finish, 1);
return 0;
}
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
* because of the physdev module. For ARP, indev and outdev are the
* bridge ports. */
static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_bridge_info *nf_bridge;
struct net_device *parent;
u_int8_t pf;
if (!skb->nf_bridge)
return NF_ACCEPT;
/* Need exclusive nf_bridge_info since we might have multiple
* different physoutdevs. */
if (!nf_bridge_unshare(skb))
return NF_DROP;
parent = bridge_parent(out);
if (!parent)
return NF_DROP;
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
nf_bridge = skb->nf_bridge;
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
}
if (br_parse_ip_options(skb))
return NF_DROP;
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
if (pf == PF_INET)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
br_nf_forward_finish);
return NF_STOLEN;
}
static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct net_bridge_port *p;
struct net_bridge *br;
struct net_device **d = (struct net_device **)(skb->cb);
p = br_port_get_rcu(out);
if (p == NULL)
return NF_ACCEPT;
br = p->br;
if (!brnf_call_arptables && !br->nf_call_arptables)
return NF_ACCEPT;
if (skb->protocol != htons(ETH_P_ARP)) {
if (!IS_VLAN_ARP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
}
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);
return NF_ACCEPT;
}
*d = (struct net_device *)in;
NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
(struct net_device *)out, br_nf_forward_finish);
return NF_STOLEN;
}
#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
int ret;
if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
!skb_is_gso(skb)) {
if (br_parse_ip_options(skb))
/* Drop invalid packet */
return NF_DROP;
ret = ip_fragment(skb, br_dev_queue_push_xmit);
} else
ret = br_dev_queue_push_xmit(skb);
return ret;
}
#else
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
return br_dev_queue_push_xmit(skb);
}
#endif
/* PF_BRIDGE/POST_ROUTING ********************************************/
static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *realoutdev = bridge_parent(skb->dev);
u_int8_t pf;
if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
return NF_ACCEPT;
if (!realoutdev)
return NF_DROP;
if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
IS_PPPOE_IP(skb))
pf = PF_INET;
else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
* about the value of skb->pkt_type. */
if (skb->pkt_type == PACKET_OTHERHOST) {
skb->pkt_type = PACKET_HOST;
nf_bridge->mask |= BRNF_PKT_TYPE;
}
nf_bridge_pull_encap_header(skb);
nf_bridge_save_header(skb);
if (pf == PF_INET)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
br_nf_dev_queue_xmit);
return NF_STOLEN;
}
/* IP/SABOTAGE *****************************************************/
/* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
* for the second time. */
static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
if (skb->nf_bridge &&
!(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
return NF_STOP;
}
return NF_ACCEPT;
}
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{
.hook = br_nf_pre_routing,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_local_in,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_forward_ip,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF - 1,
},
{
.hook = br_nf_forward_arp,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_post_routing,
.owner = THIS_MODULE,
.pf = PF_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_LAST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_FIRST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_FIRST,
},
};
#ifdef CONFIG_SYSCTL
static
int brnf_sysctl_call_tables(ctl_table * ctl, int write,
void __user * buffer, size_t * lenp, loff_t * ppos)
{
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *(int *)(ctl->data))
*(int *)(ctl->data) = 1;
return ret;
}
static ctl_table brnf_table[] = {
{
.procname = "bridge-nf-call-arptables",
.data = &brnf_call_arptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-iptables",
.data = &brnf_call_iptables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-call-ip6tables",
.data = &brnf_call_ip6tables,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-vlan-tagged",
.data = &brnf_filter_vlan_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{
.procname = "bridge-nf-filter-pppoe-tagged",
.data = &brnf_filter_pppoe_tagged,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = brnf_sysctl_call_tables,
},
{ }
};
static struct ctl_path brnf_path[] = {
{ .procname = "net", },
{ .procname = "bridge", },
{ }
};
#endif
int __init br_netfilter_init(void)
{
int ret;
ret = dst_entries_init(&fake_dst_ops);
if (ret < 0)
return ret;
ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
if (ret < 0) {
dst_entries_destroy(&fake_dst_ops);
return ret;
}
#ifdef CONFIG_SYSCTL
brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
dst_entries_destroy(&fake_dst_ops);
return -ENOMEM;
}
#endif
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
}
void br_netfilter_fini(void)
{
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(brnf_sysctl_header);
#endif
dst_entries_destroy(&fake_dst_ops);
}
|
161,026,738,622,539 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for inet_sock
*
* Authors: Many, reorganised here by
* Arnaldo Carvalho de Melo <acme@mandriva.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _INET_SOCK_H
#define _INET_SOCK_H
#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jhash.h>
#include <linux/netdevice.h>
#include <net/flow.h>
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
* @is_data - Options in __data, rather than skb
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
* @is_changed - IP checksum more not valid
* @rr_needaddr - Need to record addr of outgoing dev
* @ts_needtime - Need to record timestamp
* @ts_needaddr - Need to record addr of outgoing dev
*/
struct ip_options {
__be32 faddr;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
unsigned char ts;
unsigned char is_strictroute:1,
srr_is_hit:1,
is_changed:1,
rr_needaddr:1,
ts_needtime:1,
ts_needaddr:1;
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
unsigned char __data[0];
};
#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
struct inet_request_sock {
struct request_sock req;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
u16 inet6_rsk_offset;
#endif
__be16 loc_port;
__be32 loc_addr;
__be32 rmt_addr;
__be16 rmt_port;
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
wscale_ok : 1,
ecn_ok : 1,
acked : 1,
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
struct ip_options *opt;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
{
return (struct inet_request_sock *)sk;
}
struct inet_cork {
unsigned int flags;
unsigned int fragsize;
struct ip_options *opt;
struct dst_entry *dst;
int length; /* Total length of all frames */
__be32 addr;
struct flowi fl;
struct page *page;
u32 off;
u8 tx_flags;
};
struct ip_mc_socklist;
struct ipv6_pinfo;
struct rtable;
/** struct inet_sock - representation of INET sockets
*
* @sk - ancestor class
* @pinet6 - pointer to IPv6 control block
* @inet_daddr - Foreign IPv4 addr
* @inet_rcv_saddr - Bound local IPv4 addr
* @inet_dport - Destination port
* @inet_num - Local port
* @inet_saddr - Sending source
* @uc_ttl - Unicast TTL
* @inet_sport - Source port
* @inet_id - ID counter for DF pkts
* @tos - TOS
* @mc_ttl - Multicasting TTL
* @is_icsk - is this an inet_connection_sock?
* @mc_index - Multicast device index
* @mc_list - Group array
* @cork - info to build ip hdr on each ip frag while socket is corked
*/
struct inet_sock {
/* sk and pinet6 has to be the first two members of inet_sock */
struct sock sk;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct ipv6_pinfo *pinet6;
#endif
/* Socket demultiplex comparisons on incoming packets. */
#define inet_daddr sk.__sk_common.skc_daddr
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
__be16 inet_dport;
__u16 inet_num;
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
__be16 inet_sport;
__u16 inet_id;
struct ip_options *opt;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
__u8 recverr:1,
is_icsk:1,
freebind:1,
hdrincl:1,
mc_loop:1,
transparent:1,
mc_all:1,
nodefrag:1;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork cork;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
static inline struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
}
static inline void __inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from,
const int ancestor_size)
{
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
static inline void inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
__inet_sk_copy_descendant(sk_to, sk_from, sizeof(struct inet_sock));
}
#endif
extern int inet_sk_rebuild_header(struct sock *sk);
extern u32 inet_ehash_secret;
extern void build_ehash_secret(void);
static inline unsigned int inet_ehashfn(struct net *net,
const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
return jhash_3words((__force __u32) laddr,
(__force __u32) faddr,
((__u32) lport) << 16 | (__force __u32)fport,
inet_ehash_secret + net_hash_mix(net));
}
static inline int inet_sk_ehashfn(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const __be32 laddr = inet->inet_rcv_saddr;
const __u16 lport = inet->inet_num;
const __be32 faddr = inet->inet_daddr;
const __be16 fport = inet->inet_dport;
struct net *net = sock_net(sk);
return inet_ehashfn(net, laddr, lport, faddr, fport);
}
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
struct inet_request_sock *ireq = inet_rsk(req);
if (req != NULL) {
kmemcheck_annotate_bitfield(ireq, flags);
ireq->opt = NULL;
}
return req;
}
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
__u8 flags = 0;
if (inet_sk(sk)->transparent)
flags |= FLOWI_FLAG_ANYSRC;
if (sk->sk_protocol == IPPROTO_TCP)
flags |= FLOWI_FLAG_PRECOW_METRICS;
return flags;
}
#endif /* _INET_SOCK_H */
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for inet_sock
*
* Authors: Many, reorganised here by
* Arnaldo Carvalho de Melo <acme@mandriva.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _INET_SOCK_H
#define _INET_SOCK_H
#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jhash.h>
#include <linux/netdevice.h>
#include <net/flow.h>
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
* @is_data - Options in __data, rather than skb
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
* @is_changed - IP checksum more not valid
* @rr_needaddr - Need to record addr of outgoing dev
* @ts_needtime - Need to record timestamp
* @ts_needaddr - Need to record addr of outgoing dev
*/
struct ip_options {
__be32 faddr;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
unsigned char ts;
unsigned char is_strictroute:1,
srr_is_hit:1,
is_changed:1,
rr_needaddr:1,
ts_needtime:1,
ts_needaddr:1;
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
unsigned char __data[0];
};
struct ip_options_rcu {
struct rcu_head rcu;
struct ip_options opt;
};
struct ip_options_data {
struct ip_options_rcu opt;
char data[40];
};
struct inet_request_sock {
struct request_sock req;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
u16 inet6_rsk_offset;
#endif
__be16 loc_port;
__be32 loc_addr;
__be32 rmt_addr;
__be16 rmt_port;
kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
wscale_ok : 1,
ecn_ok : 1,
acked : 1,
no_srccheck: 1;
kmemcheck_bitfield_end(flags);
struct ip_options_rcu *opt;
};
static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
{
return (struct inet_request_sock *)sk;
}
struct inet_cork {
unsigned int flags;
unsigned int fragsize;
struct ip_options *opt;
struct dst_entry *dst;
int length; /* Total length of all frames */
__be32 addr;
struct flowi fl;
struct page *page;
u32 off;
u8 tx_flags;
};
struct ip_mc_socklist;
struct ipv6_pinfo;
struct rtable;
/** struct inet_sock - representation of INET sockets
*
* @sk - ancestor class
* @pinet6 - pointer to IPv6 control block
* @inet_daddr - Foreign IPv4 addr
* @inet_rcv_saddr - Bound local IPv4 addr
* @inet_dport - Destination port
* @inet_num - Local port
* @inet_saddr - Sending source
* @uc_ttl - Unicast TTL
* @inet_sport - Source port
* @inet_id - ID counter for DF pkts
* @tos - TOS
* @mc_ttl - Multicasting TTL
* @is_icsk - is this an inet_connection_sock?
* @mc_index - Multicast device index
* @mc_list - Group array
* @cork - info to build ip hdr on each ip frag while socket is corked
*/
struct inet_sock {
/* sk and pinet6 has to be the first two members of inet_sock */
struct sock sk;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct ipv6_pinfo *pinet6;
#endif
/* Socket demultiplex comparisons on incoming packets. */
#define inet_daddr sk.__sk_common.skc_daddr
#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr
__be16 inet_dport;
__u16 inet_num;
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
__be16 inet_sport;
__u16 inet_id;
struct ip_options_rcu __rcu *inet_opt;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
__u8 recverr:1,
is_icsk:1,
freebind:1,
hdrincl:1,
mc_loop:1,
transparent:1,
mc_all:1,
nodefrag:1;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork cork;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
static inline struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
}
static inline void __inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from,
const int ancestor_size)
{
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
static inline void inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
__inet_sk_copy_descendant(sk_to, sk_from, sizeof(struct inet_sock));
}
#endif
extern int inet_sk_rebuild_header(struct sock *sk);
extern u32 inet_ehash_secret;
extern void build_ehash_secret(void);
static inline unsigned int inet_ehashfn(struct net *net,
const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
return jhash_3words((__force __u32) laddr,
(__force __u32) faddr,
((__u32) lport) << 16 | (__force __u32)fport,
inet_ehash_secret + net_hash_mix(net));
}
static inline int inet_sk_ehashfn(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const __be32 laddr = inet->inet_rcv_saddr;
const __u16 lport = inet->inet_num;
const __be32 faddr = inet->inet_daddr;
const __be16 fport = inet->inet_dport;
struct net *net = sock_net(sk);
return inet_ehashfn(net, laddr, lport, faddr, fport);
}
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
struct inet_request_sock *ireq = inet_rsk(req);
if (req != NULL) {
kmemcheck_annotate_bitfield(ireq, flags);
ireq->opt = NULL;
}
return req;
}
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
__u8 flags = 0;
if (inet_sk(sk)->transparent)
flags |= FLOWI_FLAG_ANYSRC;
if (sk->sk_protocol == IPPROTO_TCP)
flags |= FLOWI_FLAG_PRECOW_METRICS;
return flags;
}
#endif /* _INET_SOCK_H */
|
87,450,029,682,028 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the IP module.
*
* Version: @(#)ip.h 1.0.2 05/07/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
*
* Changes:
* Mike McLagan : Routing by source
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _IP_H
#define _IP_H
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/skbuff.h>
#include <net/inet_sock.h>
#include <net/snmp.h>
#include <net/flow.h>
struct sock;
struct inet_skb_parm {
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
#define IPSKB_FORWARDED 1
#define IPSKB_XFRM_TUNNEL_SIZE 2
#define IPSKB_XFRM_TRANSFORMED 4
#define IPSKB_FRAG_COMPLETE 8
#define IPSKB_REROUTED 16
};
static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
}
struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options *opt;
__u8 tx_flags;
};
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
struct ip_ra_chain {
struct ip_ra_chain __rcu *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
struct sock *saved_sk;
};
struct rcu_head rcu;
};
extern struct ip_ra_chain __rcu *ip_ra_chain;
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */
#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
#define IP_MF 0x2000 /* Flag: "More Fragments" */
#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
struct msghdr;
struct net_device;
struct packet_type;
struct rtable;
struct sockaddr;
extern int igmp_mc_proc_init(void);
/*
* Functions provided by ip.c
*/
extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr,
struct ip_options *opt);
extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
extern int ip_local_deliver(struct sk_buff *skb);
extern int ip_mr_input(struct sk_buff *skb);
extern int ip_output(struct sk_buff *skb);
extern int ip_mc_output(struct sk_buff *skb);
extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip);
extern int __ip_local_out(struct sk_buff *skb);
extern int ip_local_out(struct sk_buff *skb);
extern int ip_queue_xmit(struct sk_buff *skb);
extern void ip_init(void);
extern int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int len, int protolen,
struct ipcm_cookie *ipc,
struct rtable **rt,
unsigned int flags);
extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
extern ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
extern struct sk_buff *__ip_make_skb(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork);
extern int ip_send_skb(struct sk_buff *skb);
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
extern struct sk_buff *ip_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc,
struct rtable **rtp,
unsigned int flags);
static inline struct sk_buff *ip_finish_skb(struct sock *sk)
{
return __ip_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
}
/* datagram.c */
extern int ip4_datagram_connect(struct sock *sk,
struct sockaddr *uaddr, int addr_len);
/*
* Map a multicast IP onto multicast MAC for type Token Ring.
* This conforms to RFC1469 Option 2 Multicasting i.e.
* using a functional address to transmit / receive
* multicast packets.
*/
static inline void ip_tr_mc_map(__be32 addr, char *buf)
{
buf[0]=0xC0;
buf[1]=0x00;
buf[2]=0x00;
buf[3]=0x04;
buf[4]=0x00;
buf[5]=0x00;
}
struct ip_reply_arg {
struct kvec iov[1];
int flags;
__wsum csum;
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
/* -1 if not needed */
int bound_dev_if;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
{
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len);
struct ipv4_config {
int log_martians;
int no_pmtu_disc;
};
extern struct ipv4_config ipv4_config;
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
#if BITS_PER_LONG==32
extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
#else
static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
}
#endif
extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
extern void snmp_mib_free(void __percpu *ptr[2]);
extern struct local_ports {
seqlock_t lock;
int range[2];
} sysctl_local_ports;
extern void inet_get_local_port_range(int *low, int *high);
extern unsigned long *sysctl_local_reserved_ports;
static inline int inet_is_reserved_local_port(int port)
{
return test_bit(port, sysctl_local_reserved_ports);
}
extern int sysctl_ip_nonlocal_bind;
extern struct ctl_path net_core_path[];
extern struct ctl_path net_ipv4_ctl_path[];
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;
extern int inet_peer_gc_mintime;
extern int inet_peer_gc_maxtime;
/* From ip_output.c */
extern int sysctl_ip_dynaddr;
extern void ipfrag_init(void);
extern void ip_static_sysctl_init(void);
#ifdef CONFIG_INET
#include <net/dst.h>
/* The function in 2.2 was invalid, producing wrong result for
* check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
static inline
int ip_decrease_ttl(struct iphdr *iph)
{
u32 check = (__force u32)iph->check;
check += (__force u32)htons(0x0100);
iph->check = (__force __sum16)(check + (check>=0xFFFF));
return --iph->ttl;
}
static inline
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
{
return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
(inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
!(dst_metric_locked(dst, RTAX_MTU)));
}
extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
{
if (iph->frag_off & htons(IP_DF)) {
/* This is only to work around buggy Windows95/2000
* VJ compression implementations. If the ID field
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
iph->id = (sk && inet_sk(sk)->inet_daddr) ?
htons(inet_sk(sk)->inet_id++) : 0;
} else
__ip_select_ident(iph, dst, 0);
}
static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
{
if (iph->frag_off & htons(IP_DF)) {
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += 1 + more;
} else
iph->id = 0;
} else
__ip_select_ident(iph, dst, more);
}
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
static inline void ip_eth_mc_map(__be32 naddr, char *buf)
{
__u32 addr=ntohl(naddr);
buf[0]=0x01;
buf[1]=0x00;
buf[2]=0x5e;
buf[5]=addr&0xFF;
addr>>=8;
buf[4]=addr&0xFF;
addr>>=8;
buf[3]=addr&0x7F;
}
/*
* Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
* Leave P_Key as 0 to be filled in by driver.
*/
static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
__u32 addr;
unsigned char scope = broadcast[5] & 0xF;
buf[0] = 0; /* Reserved */
buf[1] = 0xff; /* Multicast QPN */
buf[2] = 0xff;
buf[3] = 0xff;
addr = ntohl(naddr);
buf[4] = 0xff;
buf[5] = 0x10 | scope; /* scope from broadcast address */
buf[6] = 0x40; /* IPv4 signature */
buf[7] = 0x1b;
buf[8] = broadcast[8]; /* P_Key */
buf[9] = broadcast[9];
buf[10] = 0;
buf[11] = 0;
buf[12] = 0;
buf[13] = 0;
buf[14] = 0;
buf[15] = 0;
buf[19] = addr & 0xff;
addr >>= 8;
buf[18] = addr & 0xff;
addr >>= 8;
buf[17] = addr & 0xff;
addr >>= 8;
buf[16] = addr & 0x0f;
}
static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
memcpy(buf, broadcast, 4);
else
memcpy(buf, &naddr, sizeof(naddr));
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
static __inline__ void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
}
#endif
}
#endif
static inline int sk_mc_loop(struct sock *sk)
{
if (!sk)
return 1;
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
}
WARN_ON(1);
return 1;
}
extern int ip_call_ra_chain(struct sk_buff *skb);
/*
* Functions provided by ip_fragment.c
*/
enum ip_defrag_users {
IP_DEFRAG_LOCAL_DELIVER,
IP_DEFRAG_CALL_RA_CHAIN,
IP_DEFRAG_CONNTRACK_IN,
__IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
IP_DEFRAG_CONNTRACK_OUT,
__IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
IP_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD
};
int ip_defrag(struct sk_buff *skb, u32 user);
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
/*
* Functions provided by ip_forward.c
*/
extern int ip_forward(struct sk_buff *skb);
/*
* Functions provided by ip_options.c
*/
extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag);
extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
extern void ip_options_fragment(struct sk_buff *skb);
extern int ip_options_compile(struct net *net,
struct ip_options *opt, struct sk_buff *skb);
extern int ip_options_get(struct net *net, struct ip_options **optp,
unsigned char *data, int optlen);
extern int ip_options_get_from_user(struct net *net, struct ip_options **optp,
unsigned char __user *data, int optlen);
extern void ip_options_undo(struct ip_options * opt);
extern void ip_forward_options(struct sk_buff *skb);
extern int ip_options_rcv_srr(struct sk_buff *skb);
/*
* Functions provided by ip_sockglue.c
*/
extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
extern int ip_cmsg_send(struct net *net,
struct msghdr *msg, struct ipcm_cookie *ipc);
extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
extern int compat_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen);
extern int compat_ip_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen);
extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload);
extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
#ifdef CONFIG_PROC_FS
extern int ip_misc_proc_init(void);
#endif
#endif /* _IP_H */
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the IP module.
*
* Version: @(#)ip.h 1.0.2 05/07/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
*
* Changes:
* Mike McLagan : Routing by source
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _IP_H
#define _IP_H
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/skbuff.h>
#include <net/inet_sock.h>
#include <net/snmp.h>
#include <net/flow.h>
struct sock;
struct inet_skb_parm {
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
#define IPSKB_FORWARDED 1
#define IPSKB_XFRM_TUNNEL_SIZE 2
#define IPSKB_XFRM_TRANSFORMED 4
#define IPSKB_FRAG_COMPLETE 8
#define IPSKB_REROUTED 16
};
static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
}
struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options_rcu *opt;
__u8 tx_flags;
};
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
struct ip_ra_chain {
struct ip_ra_chain __rcu *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
struct sock *saved_sk;
};
struct rcu_head rcu;
};
extern struct ip_ra_chain __rcu *ip_ra_chain;
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */
#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
#define IP_MF 0x2000 /* Flag: "More Fragments" */
#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
struct msghdr;
struct net_device;
struct packet_type;
struct rtable;
struct sockaddr;
extern int igmp_mc_proc_init(void);
/*
* Functions provided by ip.c
*/
extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr,
struct ip_options_rcu *opt);
extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
extern int ip_local_deliver(struct sk_buff *skb);
extern int ip_mr_input(struct sk_buff *skb);
extern int ip_output(struct sk_buff *skb);
extern int ip_mc_output(struct sk_buff *skb);
extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip);
extern int __ip_local_out(struct sk_buff *skb);
extern int ip_local_out(struct sk_buff *skb);
extern int ip_queue_xmit(struct sk_buff *skb);
extern void ip_init(void);
extern int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int len, int protolen,
struct ipcm_cookie *ipc,
struct rtable **rt,
unsigned int flags);
extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
extern ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
extern struct sk_buff *__ip_make_skb(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork);
extern int ip_send_skb(struct sk_buff *skb);
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
extern struct sk_buff *ip_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc,
struct rtable **rtp,
unsigned int flags);
static inline struct sk_buff *ip_finish_skb(struct sock *sk)
{
return __ip_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
}
/* datagram.c */
extern int ip4_datagram_connect(struct sock *sk,
struct sockaddr *uaddr, int addr_len);
/*
* Map a multicast IP onto multicast MAC for type Token Ring.
* This conforms to RFC1469 Option 2 Multicasting i.e.
* using a functional address to transmit / receive
* multicast packets.
*/
static inline void ip_tr_mc_map(__be32 addr, char *buf)
{
buf[0]=0xC0;
buf[1]=0x00;
buf[2]=0x00;
buf[3]=0x04;
buf[4]=0x00;
buf[5]=0x00;
}
struct ip_reply_arg {
struct kvec iov[1];
int flags;
__wsum csum;
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
/* -1 if not needed */
int bound_dev_if;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
{
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len);
struct ipv4_config {
int log_martians;
int no_pmtu_disc;
};
extern struct ipv4_config ipv4_config;
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
#if BITS_PER_LONG==32
extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
#else
static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
{
return snmp_fold_field(mib, offt);
}
#endif
extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
extern void snmp_mib_free(void __percpu *ptr[2]);
extern struct local_ports {
seqlock_t lock;
int range[2];
} sysctl_local_ports;
extern void inet_get_local_port_range(int *low, int *high);
extern unsigned long *sysctl_local_reserved_ports;
static inline int inet_is_reserved_local_port(int port)
{
return test_bit(port, sysctl_local_reserved_ports);
}
extern int sysctl_ip_nonlocal_bind;
extern struct ctl_path net_core_path[];
extern struct ctl_path net_ipv4_ctl_path[];
/* From inetpeer.c */
extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;
extern int inet_peer_gc_mintime;
extern int inet_peer_gc_maxtime;
/* From ip_output.c */
extern int sysctl_ip_dynaddr;
extern void ipfrag_init(void);
extern void ip_static_sysctl_init(void);
#ifdef CONFIG_INET
#include <net/dst.h>
/* The function in 2.2 was invalid, producing wrong result for
* check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
static inline
int ip_decrease_ttl(struct iphdr *iph)
{
u32 check = (__force u32)iph->check;
check += (__force u32)htons(0x0100);
iph->check = (__force __sum16)(check + (check>=0xFFFF));
return --iph->ttl;
}
static inline
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
{
return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
(inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
!(dst_metric_locked(dst, RTAX_MTU)));
}
extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
{
if (iph->frag_off & htons(IP_DF)) {
/* This is only to work around buggy Windows95/2000
* VJ compression implementations. If the ID field
* does not change, they drop every other packet in
* a TCP stream using header compression.
*/
iph->id = (sk && inet_sk(sk)->inet_daddr) ?
htons(inet_sk(sk)->inet_id++) : 0;
} else
__ip_select_ident(iph, dst, 0);
}
static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
{
if (iph->frag_off & htons(IP_DF)) {
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += 1 + more;
} else
iph->id = 0;
} else
__ip_select_ident(iph, dst, more);
}
/*
* Map a multicast IP onto multicast MAC for type ethernet.
*/
static inline void ip_eth_mc_map(__be32 naddr, char *buf)
{
__u32 addr=ntohl(naddr);
buf[0]=0x01;
buf[1]=0x00;
buf[2]=0x5e;
buf[5]=addr&0xFF;
addr>>=8;
buf[4]=addr&0xFF;
addr>>=8;
buf[3]=addr&0x7F;
}
/*
* Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
* Leave P_Key as 0 to be filled in by driver.
*/
static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
__u32 addr;
unsigned char scope = broadcast[5] & 0xF;
buf[0] = 0; /* Reserved */
buf[1] = 0xff; /* Multicast QPN */
buf[2] = 0xff;
buf[3] = 0xff;
addr = ntohl(naddr);
buf[4] = 0xff;
buf[5] = 0x10 | scope; /* scope from broadcast address */
buf[6] = 0x40; /* IPv4 signature */
buf[7] = 0x1b;
buf[8] = broadcast[8]; /* P_Key */
buf[9] = broadcast[9];
buf[10] = 0;
buf[11] = 0;
buf[12] = 0;
buf[13] = 0;
buf[14] = 0;
buf[15] = 0;
buf[19] = addr & 0xff;
addr >>= 8;
buf[18] = addr & 0xff;
addr >>= 8;
buf[17] = addr & 0xff;
addr >>= 8;
buf[16] = addr & 0x0f;
}
static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
memcpy(buf, broadcast, 4);
else
memcpy(buf, &naddr, sizeof(naddr));
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
static __inline__ void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
}
#endif
}
#endif
static inline int sk_mc_loop(struct sock *sk)
{
if (!sk)
return 1;
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
}
WARN_ON(1);
return 1;
}
extern int ip_call_ra_chain(struct sk_buff *skb);
/*
* Functions provided by ip_fragment.c
*/
enum ip_defrag_users {
IP_DEFRAG_LOCAL_DELIVER,
IP_DEFRAG_CALL_RA_CHAIN,
IP_DEFRAG_CONNTRACK_IN,
__IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
IP_DEFRAG_CONNTRACK_OUT,
__IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
IP_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD
};
int ip_defrag(struct sk_buff *skb, u32 user);
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
/*
* Functions provided by ip_forward.c
*/
extern int ip_forward(struct sk_buff *skb);
/*
* Functions provided by ip_options.c
*/
extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag);
extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
extern void ip_options_fragment(struct sk_buff *skb);
extern int ip_options_compile(struct net *net,
struct ip_options *opt, struct sk_buff *skb);
extern int ip_options_get(struct net *net, struct ip_options_rcu **optp,
unsigned char *data, int optlen);
extern int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
unsigned char __user *data, int optlen);
extern void ip_options_undo(struct ip_options * opt);
extern void ip_forward_options(struct sk_buff *skb);
extern int ip_options_rcv_srr(struct sk_buff *skb);
/*
* Functions provided by ip_sockglue.c
*/
extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
extern int ip_cmsg_send(struct net *net,
struct msghdr *msg, struct ipcm_cookie *ipc);
extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
extern int compat_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen);
extern int compat_ip_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen);
extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload);
extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
#ifdef CONFIG_PROC_FS
extern int ip_misc_proc_init(void);
#endif
#endif /* _IP_H */
|
60,835,035,360,579 | /*
* net/dccp/ipv4.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/icmp.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/inet_sock.h>
#include <net/protocol.h>
#include <net/sock.h>
#include <net/timewait_sock.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
#include "feat.h"
/*
* The per-net dccp.v4_ctl_sk socket is used for responding to
* the Out-of-the-blue (OOTB) packets. A control sock will be created
* for this socket at the initialization time.
*/
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 fl4;
struct rtable *rt;
int err;
dp->dccps_role = DCCP_ROLE_CLIENT;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
if (inet->opt != NULL && inet->opt->srr) {
if (daddr == 0)
return -EINVAL;
nexthop = inet->opt->faddr;
}
orig_sport = inet->inet_sport;
orig_dport = usin->sin_port;
rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
orig_sport, orig_dport, sk, true);
if (IS_ERR(rt))
return PTR_ERR(rt);
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (inet->opt == NULL || !inet->opt->srr)
daddr = rt->rt_dst;
if (inet->inet_saddr == 0)
inet->inet_saddr = rt->rt_src;
inet->inet_rcv_saddr = inet->inet_saddr;
inet->inet_dport = usin->sin_port;
inet->inet_daddr = daddr;
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet->opt != NULL)
inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
/*
* Socket identity is still unknown (sport may be zero).
* However we set state to DCCP_REQUESTING and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
dccp_set_state(sk, DCCP_REQUESTING);
err = inet_hash_connect(&dccp_death_row, sk);
if (err != 0)
goto failure;
rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
rt = NULL;
goto failure;
}
/* OK, now commit destination to socket. */
sk_setup_caps(sk, &rt->dst);
dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
inet->inet_id = dp->dccps_iss ^ jiffies;
err = dccp_connect(sk);
rt = NULL;
if (err != 0)
goto failure;
out:
return err;
failure:
/*
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
goto out;
}
EXPORT_SYMBOL_GPL(dccp_v4_connect);
/*
* This routine does path mtu discovery as defined in RFC1191.
*/
static inline void dccp_do_pmtu_discovery(struct sock *sk,
const struct iphdr *iph,
u32 mtu)
{
struct dst_entry *dst;
const struct inet_sock *inet = inet_sk(sk);
const struct dccp_sock *dp = dccp_sk(sk);
/* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
* send out by Linux are always < 576bytes so they should go through
* unfragmented).
*/
if (sk->sk_state == DCCP_LISTEN)
return;
/* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets
* are send back when pmtu discovery is not active.
* There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
if ((dst = __sk_dst_check(sk, 0)) == NULL)
return;
dst->ops->update_pmtu(dst, mtu);
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
inet_csk(sk)->icsk_pmtu_cookie > mtu) {
dccp_sync_mss(sk, mtu);
/*
* From RFC 4340, sec. 14.1:
*
* DCCP-Sync packets are the best choice for upward
* probing, since DCCP-Sync probes do not risk application
* data loss.
*/
dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
} /* else let the usual retransmit timer handle it */
}
/*
* This routine is called by the ICMP module when it gets some sort of error
* condition. If err < 0 then the socket should be closed and the error
* returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
* After adjustment header points to the first 8 bytes of the tcp header. We
* need to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When someone else
* accesses the socket the ICMP is just dropped and for some paths there is no
* check at all. A more general error queue to queue errors for later handling
* is probably better.
*/
static void dccp_v4_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (struct iphdr *)skb->data;
const u8 offset = iph->ihl << 2;
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
struct dccp_sock *dp;
struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
__u64 seq;
int err;
struct net *net = dev_net(skb->dev);
if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
sk = inet_lookup(net, &dccp_hashinfo,
iph->daddr, dh->dccph_dport,
iph->saddr, dh->dccph_sport, inet_iif(skb));
if (sk == NULL) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED)
goto out;
dp = dccp_sk(sk);
seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (!sock_owned_by_user(sk))
dccp_do_pmtu_discovery(sk, iph, info);
goto out;
}
err = icmp_err_convert[code].errno;
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
struct request_sock *req , **prev;
case DCCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
iph->daddr, iph->saddr);
if (!req)
goto out;
/*
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
WARN_ON(req->sk);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
/*
* Still in RESPOND, just remove it silently.
* There is no good way to pass the error to the newly
* created socket, and POSIX does not want network
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case DCCP_REQUESTING:
case DCCP_RESPOND:
if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
sk->sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
/* If we've already connected we will keep trying
* until we time out, or the user gives up.
*
* rfc1122 4.2.3.9 allows to consider as hard errors
* only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
* but it is obsoleted by pmtu discovery).
*
* Note, that in modern internet, where routing is unreliable
* and in each dark corner broken firewalls sit, sending random
* errors ordered by their masters even this two messages finally lose
* their original sense (even Linux sends invalid PORT_UNREACHs)
*
* Now we are in compliance with RFCs.
* --ANK (980905)
*/
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else /* Only an error on timeout */
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
__be32 src, __be32 dst)
{
return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
}
void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
dccp_csum_outgoing(skb);
dh->dccph_checksum = dccp_v4_csum_finish(skb,
inet->inet_saddr,
inet->inet_daddr);
}
EXPORT_SYMBOL_GPL(dccp_v4_send_check);
static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
{
return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
dccp_hdr(skb)->dccph_dport,
dccp_hdr(skb)->dccph_sport);
}
/*
* The three way handshake has completed - we got a valid ACK or DATAACK -
* now create the new socket.
*
* This is the equivalent of TCP's tcp_v4_syn_recv_sock
*/
struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct sock *newsk;
if (sk_acceptq_is_full(sk))
goto exit_overflow;
if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
goto exit;
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto exit_nonewsk;
sk_setup_caps(newsk, dst);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
newinet->inet_daddr = ireq->rmt_addr;
newinet->inet_rcv_saddr = ireq->loc_addr;
newinet->inet_saddr = ireq->loc_addr;
newinet->opt = ireq->opt;
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->inet_id = jiffies;
dccp_sync_mss(newsk, dst_mtu(dst));
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto exit;
}
__inet_hash_nolisten(newsk, NULL);
return newsk;
exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk:
dst_release(dst);
exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
}
EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *nsk;
struct request_sock **prev;
/* Find possible connection requests. */
struct request_sock *req = inet_csk_search_req(sk, &prev,
dh->dccph_sport,
iph->saddr, iph->daddr);
if (req != NULL)
return dccp_check_req(sk, skb, req, prev);
nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
iph->saddr, dh->dccph_sport,
iph->daddr, dh->dccph_dport,
inet_iif(skb));
if (nsk != NULL) {
if (nsk->sk_state != DCCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
return sk;
}
static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = skb_rtable(skb)->rt_iif,
.daddr = ip_hdr(skb)->saddr,
.saddr = ip_hdr(skb)->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk),
.flowi4_proto = sk->sk_protocol,
.fl4_sport = dccp_hdr(skb)->dccph_dport,
.fl4_dport = dccp_hdr(skb)->dccph_sport,
};
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
return &rt->dst;
}
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
struct request_values *rv_unused)
{
int err = -1;
struct sk_buff *skb;
struct dst_entry *dst;
dst = inet_csk_route_req(sk, req);
if (dst == NULL)
goto out;
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
ireq->rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
}
out:
dst_release(dst);
return err;
}
static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
int err;
const struct iphdr *rxiph;
struct sk_buff *skb;
struct dst_entry *dst;
struct net *net = dev_net(skb_dst(rxskb)->dev);
struct sock *ctl_sk = net->dccp.v4_ctl_sk;
/* Never send a reset in response to a reset. */
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
return;
if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
return;
dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
if (dst == NULL)
return;
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
if (skb == NULL)
goto out;
rxiph = ip_hdr(rxskb);
dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
rxiph->daddr);
skb_dst_set(skb, dst_clone(dst));
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
rxiph->daddr, rxiph->saddr, NULL);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
}
out:
dst_release(dst);
}
static void dccp_v4_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
kfree(inet_rsk(req)->opt);
}
static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct dccp_request_sock),
.rtx_syn_ack = dccp_v4_send_response,
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v4_reqsk_destructor,
.send_reset = dccp_v4_ctl_send_reset,
};
int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct inet_request_sock *ireq;
struct request_sock *req;
struct dccp_request_sock *dreq;
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
/* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return 0; /* discard, don't send a reset here */
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
}
/*
* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
*/
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
if (inet_csk_reqsk_queue_is_full(sk))
goto drop;
/*
* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet_reqsk_alloc(&dccp_request_sock_ops);
if (req == NULL)
goto drop;
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
goto drop_and_free;
dreq = dccp_rsk(req);
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
ireq = inet_rsk(req);
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
/*
* Step 3: Process LISTEN state
*
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
*
* In fact we defer setting S.GSR, S.SWL, S.SWH to
* dccp_create_openreq_child.
*/
dreq->dreq_isr = dcb->dccpd_seq;
dreq->dreq_iss = dccp_v4_init_sequence(skb);
dreq->dreq_service = service;
if (dccp_v4_send_response(sk, req, NULL))
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
return 0;
drop_and_free:
reqsk_free(req);
drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
return -1;
}
EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct dccp_hdr *dh = dccp_hdr(skb);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dh, skb->len))
goto reset;
return 0;
}
/*
* Step 3: Process LISTEN state
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
* Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*
* NOTE: the check for the packet types is done in
* dccp_rcv_state_process
*/
if (sk->sk_state == DCCP_LISTEN) {
struct sock *nsk = dccp_v4_hnd_req(sk, skb);
if (nsk == NULL)
goto discard;
if (nsk != sk) {
if (dccp_child_process(sk, nsk, skb))
goto reset;
return 0;
}
}
if (dccp_rcv_state_process(sk, skb, dh, skb->len))
goto reset;
return 0;
reset:
dccp_v4_ctl_send_reset(sk, skb);
discard:
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
/**
* dccp_invalid_packet - check for malformed packets
* Implements RFC 4340, 8.5: Step 1: Check header basics
* Packets that fail these checks are ignored and do not receive Resets.
*/
int dccp_invalid_packet(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
unsigned int cscov;
if (skb->pkt_type != PACKET_HOST)
return 1;
/* If the packet is shorter than 12 bytes, drop packet and return */
if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
DCCP_WARN("pskb_may_pull failed\n");
return 1;
}
dh = dccp_hdr(skb);
/* If P.type is not understood, drop packet and return */
if (dh->dccph_type >= DCCP_PKT_INVALID) {
DCCP_WARN("invalid packet type\n");
return 1;
}
/*
* If P.Data Offset is too small for packet type, drop packet and return
*/
if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
return 1;
}
/*
* If P.Data Offset is too too large for packet, drop packet and return
*/
if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
return 1;
}
/*
* If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
* has short sequence numbers), drop packet and return
*/
if ((dh->dccph_type < DCCP_PKT_DATA ||
dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) {
DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
dccp_packet_name(dh->dccph_type));
return 1;
}
/*
* If P.CsCov is too large for the packet size, drop packet and return.
* This must come _before_ checksumming (not as RFC 4340 suggests).
*/
cscov = dccp_csum_coverage(skb);
if (cscov > skb->len) {
DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
dh->dccph_cscov, skb->len);
return 1;
}
/* If header checksum is incorrect, drop packet and return.
* (This step is completed in the AF-dependent functions.) */
skb->csum = skb_checksum(skb, 0, cscov, 0);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_invalid_packet);
/* this is called when real data arrives */
static int dccp_v4_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
const struct iphdr *iph;
struct sock *sk;
int min_cov;
/* Step 1: Check header basics */
if (dccp_invalid_packet(skb))
goto discard_it;
iph = ip_hdr(skb);
/* Step 1: If header checksum is incorrect, drop packet and return */
if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
DCCP_WARN("dropped packet with invalid checksum\n");
goto discard_it;
}
dh = dccp_hdr(skb);
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
dccp_pr_debug("%8.8s src=%pI4@%-5d dst=%pI4@%-5d seq=%llu",
dccp_packet_name(dh->dccph_type),
&iph->saddr, ntohs(dh->dccph_sport),
&iph->daddr, ntohs(dh->dccph_dport),
(unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
if (dccp_packet_without_ack(skb)) {
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
dccp_pr_debug_cat("\n");
} else {
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
DCCP_SKB_CB(skb)->dccpd_ack_seq);
}
/* Step 2:
* Look up flow ID in table and get corresponding socket */
sk = __inet_lookup_skb(&dccp_hashinfo, skb,
dh->dccph_sport, dh->dccph_dport);
/*
* Step 2:
* If no socket ...
*/
if (sk == NULL) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
goto no_dccp_socket;
}
/*
* Step 2:
* ... or S.state == TIMEWAIT,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_TIME_WAIT) {
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
/*
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
*/
min_cov = dccp_sk(sk)->dccps_pcrlen;
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
dh->dccph_cscov, min_cov);
/* FIXME: "Such packets SHOULD be reported using Data Dropped
* options (Section 11.7) with Drop Code 0, Protocol
* Constraints." */
goto discard_and_relse;
}
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
/*
* Step 2:
* If no socket ...
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
dccp_v4_ctl_send_reset(sk, skb);
}
discard_it:
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
}
static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = dccp_v4_conn_request,
.syn_recv_sock = dccp_v4_request_recv_sock,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
static int dccp_v4_init_sock(struct sock *sk)
{
static __u8 dccp_v4_ctl_sock_initialized;
int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
if (err == 0) {
if (unlikely(!dccp_v4_ctl_sock_initialized))
dccp_v4_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
}
return err;
}
static struct timewait_sock_ops dccp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct inet_timewait_sock),
};
static struct proto dccp_v4_prot = {
.name = "DCCP",
.owner = THIS_MODULE,
.close = dccp_close,
.connect = dccp_v4_connect,
.disconnect = dccp_disconnect,
.ioctl = dccp_ioctl,
.init = dccp_v4_init_sock,
.setsockopt = dccp_setsockopt,
.getsockopt = dccp_getsockopt,
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v4_do_rcv,
.hash = inet_hash,
.unhash = inet_unhash,
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
};
static const struct net_protocol dccp_v4_protocol = {
.handler = dccp_v4_rcv,
.err_handler = dccp_v4_err,
.no_policy = 1,
.netns_ok = 1,
};
static const struct proto_ops inet_dccp_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
/* FIXME: work on tcp_poll to rename it to inet_csk_poll */
.poll = dccp_poll,
.ioctl = inet_ioctl,
/* FIXME: work on inet_listen to rename it to sock_common_listen */
.listen = inet_dccp_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw dccp_v4_protosw = {
.type = SOCK_DCCP,
.protocol = IPPROTO_DCCP,
.prot = &dccp_v4_prot,
.ops = &inet_dccp_ops,
.no_check = 0,
.flags = INET_PROTOSW_ICSK,
};
static int __net_init dccp_v4_init_net(struct net *net)
{
if (dccp_hashinfo.bhash == NULL)
return -ESOCKTNOSUPPORT;
return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
SOCK_DCCP, IPPROTO_DCCP, net);
}
static void __net_exit dccp_v4_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
}
static struct pernet_operations dccp_v4_ops = {
.init = dccp_v4_init_net,
.exit = dccp_v4_exit_net,
};
static int __init dccp_v4_init(void)
{
int err = proto_register(&dccp_v4_prot, 1);
if (err != 0)
goto out;
err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
if (err != 0)
goto out_proto_unregister;
inet_register_protosw(&dccp_v4_protosw);
err = register_pernet_subsys(&dccp_v4_ops);
if (err)
goto out_destroy_ctl_sock;
out:
return err;
out_destroy_ctl_sock:
inet_unregister_protosw(&dccp_v4_protosw);
inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
out_proto_unregister:
proto_unregister(&dccp_v4_prot);
goto out;
}
static void __exit dccp_v4_exit(void)
{
unregister_pernet_subsys(&dccp_v4_ops);
inet_unregister_protosw(&dccp_v4_protosw);
inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
proto_unregister(&dccp_v4_prot);
}
module_init(dccp_v4_init);
module_exit(dccp_v4_exit);
/*
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
* values directly, Also cover the case where the protocol is not specified,
* i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
| /*
* net/dccp/ipv4.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/icmp.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/inet_sock.h>
#include <net/protocol.h>
#include <net/sock.h>
#include <net/timewait_sock.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
#include "feat.h"
/*
* The per-net dccp.v4_ctl_sk socket is used for responding to
* the Out-of-the-blue (OOTB) packets. A control sock will be created
* for this socket at the initialization time.
*/
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 fl4;
struct rtable *rt;
int err;
struct ip_options_rcu *inet_opt;
dp->dccps_role = DCCP_ROLE_CLIENT;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
if (inet_opt != NULL && inet_opt->opt.srr) {
if (daddr == 0)
return -EINVAL;
nexthop = inet_opt->opt.faddr;
}
orig_sport = inet->inet_sport;
orig_dport = usin->sin_port;
rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
orig_sport, orig_dport, sk, true);
if (IS_ERR(rt))
return PTR_ERR(rt);
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (inet_opt == NULL || !inet_opt->opt.srr)
daddr = rt->rt_dst;
if (inet->inet_saddr == 0)
inet->inet_saddr = rt->rt_src;
inet->inet_rcv_saddr = inet->inet_saddr;
inet->inet_dport = usin->sin_port;
inet->inet_daddr = daddr;
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
/*
* Socket identity is still unknown (sport may be zero).
* However we set state to DCCP_REQUESTING and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
dccp_set_state(sk, DCCP_REQUESTING);
err = inet_hash_connect(&dccp_death_row, sk);
if (err != 0)
goto failure;
rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
rt = NULL;
goto failure;
}
/* OK, now commit destination to socket. */
sk_setup_caps(sk, &rt->dst);
dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
inet->inet_id = dp->dccps_iss ^ jiffies;
err = dccp_connect(sk);
rt = NULL;
if (err != 0)
goto failure;
out:
return err;
failure:
/*
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
goto out;
}
EXPORT_SYMBOL_GPL(dccp_v4_connect);
/*
* This routine does path mtu discovery as defined in RFC1191.
*/
static inline void dccp_do_pmtu_discovery(struct sock *sk,
const struct iphdr *iph,
u32 mtu)
{
struct dst_entry *dst;
const struct inet_sock *inet = inet_sk(sk);
const struct dccp_sock *dp = dccp_sk(sk);
/* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
* send out by Linux are always < 576bytes so they should go through
* unfragmented).
*/
if (sk->sk_state == DCCP_LISTEN)
return;
/* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets
* are send back when pmtu discovery is not active.
* There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
if ((dst = __sk_dst_check(sk, 0)) == NULL)
return;
dst->ops->update_pmtu(dst, mtu);
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
inet_csk(sk)->icsk_pmtu_cookie > mtu) {
dccp_sync_mss(sk, mtu);
/*
* From RFC 4340, sec. 14.1:
*
* DCCP-Sync packets are the best choice for upward
* probing, since DCCP-Sync probes do not risk application
* data loss.
*/
dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
} /* else let the usual retransmit timer handle it */
}
/*
* This routine is called by the ICMP module when it gets some sort of error
* condition. If err < 0 then the socket should be closed and the error
* returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
* After adjustment header points to the first 8 bytes of the tcp header. We
* need to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When someone else
* accesses the socket the ICMP is just dropped and for some paths there is no
* check at all. A more general error queue to queue errors for later handling
* is probably better.
*/
static void dccp_v4_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (struct iphdr *)skb->data;
const u8 offset = iph->ihl << 2;
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
struct dccp_sock *dp;
struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
__u64 seq;
int err;
struct net *net = dev_net(skb->dev);
if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
sk = inet_lookup(net, &dccp_hashinfo,
iph->daddr, dh->dccph_dport,
iph->saddr, dh->dccph_sport, inet_iif(skb));
if (sk == NULL) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED)
goto out;
dp = dccp_sk(sk);
seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (!sock_owned_by_user(sk))
dccp_do_pmtu_discovery(sk, iph, info);
goto out;
}
err = icmp_err_convert[code].errno;
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
struct request_sock *req , **prev;
case DCCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
iph->daddr, iph->saddr);
if (!req)
goto out;
/*
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
WARN_ON(req->sk);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
/*
* Still in RESPOND, just remove it silently.
* There is no good way to pass the error to the newly
* created socket, and POSIX does not want network
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case DCCP_REQUESTING:
case DCCP_RESPOND:
if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
sk->sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
/* If we've already connected we will keep trying
* until we time out, or the user gives up.
*
* rfc1122 4.2.3.9 allows to consider as hard errors
* only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
* but it is obsoleted by pmtu discovery).
*
* Note, that in modern internet, where routing is unreliable
* and in each dark corner broken firewalls sit, sending random
* errors ordered by their masters even this two messages finally lose
* their original sense (even Linux sends invalid PORT_UNREACHs)
*
* Now we are in compliance with RFCs.
* --ANK (980905)
*/
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else /* Only an error on timeout */
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
__be32 src, __be32 dst)
{
return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
}
void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
dccp_csum_outgoing(skb);
dh->dccph_checksum = dccp_v4_csum_finish(skb,
inet->inet_saddr,
inet->inet_daddr);
}
EXPORT_SYMBOL_GPL(dccp_v4_send_check);
static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
{
return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
dccp_hdr(skb)->dccph_dport,
dccp_hdr(skb)->dccph_sport);
}
/*
* The three way handshake has completed - we got a valid ACK or DATAACK -
* now create the new socket.
*
* This is the equivalent of TCP's tcp_v4_syn_recv_sock
*/
struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct sock *newsk;
if (sk_acceptq_is_full(sk))
goto exit_overflow;
if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
goto exit;
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto exit_nonewsk;
sk_setup_caps(newsk, dst);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
newinet->inet_daddr = ireq->rmt_addr;
newinet->inet_rcv_saddr = ireq->loc_addr;
newinet->inet_saddr = ireq->loc_addr;
newinet->inet_opt = ireq->opt;
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->inet_id = jiffies;
dccp_sync_mss(newsk, dst_mtu(dst));
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto exit;
}
__inet_hash_nolisten(newsk, NULL);
return newsk;
exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk:
dst_release(dst);
exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
}
EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *nsk;
struct request_sock **prev;
/* Find possible connection requests. */
struct request_sock *req = inet_csk_search_req(sk, &prev,
dh->dccph_sport,
iph->saddr, iph->daddr);
if (req != NULL)
return dccp_check_req(sk, skb, req, prev);
nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
iph->saddr, dh->dccph_sport,
iph->daddr, dh->dccph_dport,
inet_iif(skb));
if (nsk != NULL) {
if (nsk->sk_state != DCCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
return sk;
}
static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = skb_rtable(skb)->rt_iif,
.daddr = ip_hdr(skb)->saddr,
.saddr = ip_hdr(skb)->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk),
.flowi4_proto = sk->sk_protocol,
.fl4_sport = dccp_hdr(skb)->dccph_dport,
.fl4_dport = dccp_hdr(skb)->dccph_sport,
};
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
return &rt->dst;
}
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
struct request_values *rv_unused)
{
int err = -1;
struct sk_buff *skb;
struct dst_entry *dst;
dst = inet_csk_route_req(sk, req);
if (dst == NULL)
goto out;
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
ireq->rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
}
out:
dst_release(dst);
return err;
}
static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
int err;
const struct iphdr *rxiph;
struct sk_buff *skb;
struct dst_entry *dst;
struct net *net = dev_net(skb_dst(rxskb)->dev);
struct sock *ctl_sk = net->dccp.v4_ctl_sk;
/* Never send a reset in response to a reset. */
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
return;
if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
return;
dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
if (dst == NULL)
return;
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
if (skb == NULL)
goto out;
rxiph = ip_hdr(rxskb);
dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
rxiph->daddr);
skb_dst_set(skb, dst_clone(dst));
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
rxiph->daddr, rxiph->saddr, NULL);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
}
out:
dst_release(dst);
}
static void dccp_v4_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
kfree(inet_rsk(req)->opt);
}
static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct dccp_request_sock),
.rtx_syn_ack = dccp_v4_send_response,
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v4_reqsk_destructor,
.send_reset = dccp_v4_ctl_send_reset,
};
int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct inet_request_sock *ireq;
struct request_sock *req;
struct dccp_request_sock *dreq;
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
/* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return 0; /* discard, don't send a reset here */
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
}
/*
* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
*/
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
if (inet_csk_reqsk_queue_is_full(sk))
goto drop;
/*
* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet_reqsk_alloc(&dccp_request_sock_ops);
if (req == NULL)
goto drop;
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
goto drop_and_free;
dreq = dccp_rsk(req);
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
ireq = inet_rsk(req);
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
/*
* Step 3: Process LISTEN state
*
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
*
* In fact we defer setting S.GSR, S.SWL, S.SWH to
* dccp_create_openreq_child.
*/
dreq->dreq_isr = dcb->dccpd_seq;
dreq->dreq_iss = dccp_v4_init_sequence(skb);
dreq->dreq_service = service;
if (dccp_v4_send_response(sk, req, NULL))
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
return 0;
drop_and_free:
reqsk_free(req);
drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
return -1;
}
EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct dccp_hdr *dh = dccp_hdr(skb);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dh, skb->len))
goto reset;
return 0;
}
/*
* Step 3: Process LISTEN state
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
* Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*
* NOTE: the check for the packet types is done in
* dccp_rcv_state_process
*/
if (sk->sk_state == DCCP_LISTEN) {
struct sock *nsk = dccp_v4_hnd_req(sk, skb);
if (nsk == NULL)
goto discard;
if (nsk != sk) {
if (dccp_child_process(sk, nsk, skb))
goto reset;
return 0;
}
}
if (dccp_rcv_state_process(sk, skb, dh, skb->len))
goto reset;
return 0;
reset:
dccp_v4_ctl_send_reset(sk, skb);
discard:
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
/**
* dccp_invalid_packet - check for malformed packets
* Implements RFC 4340, 8.5: Step 1: Check header basics
* Packets that fail these checks are ignored and do not receive Resets.
*/
int dccp_invalid_packet(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
unsigned int cscov;
if (skb->pkt_type != PACKET_HOST)
return 1;
/* If the packet is shorter than 12 bytes, drop packet and return */
if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
DCCP_WARN("pskb_may_pull failed\n");
return 1;
}
dh = dccp_hdr(skb);
/* If P.type is not understood, drop packet and return */
if (dh->dccph_type >= DCCP_PKT_INVALID) {
DCCP_WARN("invalid packet type\n");
return 1;
}
/*
* If P.Data Offset is too small for packet type, drop packet and return
*/
if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
return 1;
}
/*
* If P.Data Offset is too too large for packet, drop packet and return
*/
if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
return 1;
}
/*
* If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
* has short sequence numbers), drop packet and return
*/
if ((dh->dccph_type < DCCP_PKT_DATA ||
dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) {
DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
dccp_packet_name(dh->dccph_type));
return 1;
}
/*
* If P.CsCov is too large for the packet size, drop packet and return.
* This must come _before_ checksumming (not as RFC 4340 suggests).
*/
cscov = dccp_csum_coverage(skb);
if (cscov > skb->len) {
DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
dh->dccph_cscov, skb->len);
return 1;
}
/* If header checksum is incorrect, drop packet and return.
* (This step is completed in the AF-dependent functions.) */
skb->csum = skb_checksum(skb, 0, cscov, 0);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_invalid_packet);
/* this is called when real data arrives */
static int dccp_v4_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
const struct iphdr *iph;
struct sock *sk;
int min_cov;
/* Step 1: Check header basics */
if (dccp_invalid_packet(skb))
goto discard_it;
iph = ip_hdr(skb);
/* Step 1: If header checksum is incorrect, drop packet and return */
if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
DCCP_WARN("dropped packet with invalid checksum\n");
goto discard_it;
}
dh = dccp_hdr(skb);
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
dccp_pr_debug("%8.8s src=%pI4@%-5d dst=%pI4@%-5d seq=%llu",
dccp_packet_name(dh->dccph_type),
&iph->saddr, ntohs(dh->dccph_sport),
&iph->daddr, ntohs(dh->dccph_dport),
(unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
if (dccp_packet_without_ack(skb)) {
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
dccp_pr_debug_cat("\n");
} else {
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
DCCP_SKB_CB(skb)->dccpd_ack_seq);
}
/* Step 2:
* Look up flow ID in table and get corresponding socket */
sk = __inet_lookup_skb(&dccp_hashinfo, skb,
dh->dccph_sport, dh->dccph_dport);
/*
* Step 2:
* If no socket ...
*/
if (sk == NULL) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
goto no_dccp_socket;
}
/*
* Step 2:
* ... or S.state == TIMEWAIT,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_TIME_WAIT) {
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
/*
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
*/
min_cov = dccp_sk(sk)->dccps_pcrlen;
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
dh->dccph_cscov, min_cov);
/* FIXME: "Such packets SHOULD be reported using Data Dropped
* options (Section 11.7) with Drop Code 0, Protocol
* Constraints." */
goto discard_and_relse;
}
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
/*
* Step 2:
* If no socket ...
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
dccp_v4_ctl_send_reset(sk, skb);
}
discard_it:
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
}
static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = dccp_v4_conn_request,
.syn_recv_sock = dccp_v4_request_recv_sock,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
static int dccp_v4_init_sock(struct sock *sk)
{
static __u8 dccp_v4_ctl_sock_initialized;
int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
if (err == 0) {
if (unlikely(!dccp_v4_ctl_sock_initialized))
dccp_v4_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
}
return err;
}
static struct timewait_sock_ops dccp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct inet_timewait_sock),
};
static struct proto dccp_v4_prot = {
.name = "DCCP",
.owner = THIS_MODULE,
.close = dccp_close,
.connect = dccp_v4_connect,
.disconnect = dccp_disconnect,
.ioctl = dccp_ioctl,
.init = dccp_v4_init_sock,
.setsockopt = dccp_setsockopt,
.getsockopt = dccp_getsockopt,
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v4_do_rcv,
.hash = inet_hash,
.unhash = inet_unhash,
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
};
static const struct net_protocol dccp_v4_protocol = {
.handler = dccp_v4_rcv,
.err_handler = dccp_v4_err,
.no_policy = 1,
.netns_ok = 1,
};
static const struct proto_ops inet_dccp_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
/* FIXME: work on tcp_poll to rename it to inet_csk_poll */
.poll = dccp_poll,
.ioctl = inet_ioctl,
/* FIXME: work on inet_listen to rename it to sock_common_listen */
.listen = inet_dccp_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw dccp_v4_protosw = {
.type = SOCK_DCCP,
.protocol = IPPROTO_DCCP,
.prot = &dccp_v4_prot,
.ops = &inet_dccp_ops,
.no_check = 0,
.flags = INET_PROTOSW_ICSK,
};
static int __net_init dccp_v4_init_net(struct net *net)
{
if (dccp_hashinfo.bhash == NULL)
return -ESOCKTNOSUPPORT;
return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
SOCK_DCCP, IPPROTO_DCCP, net);
}
static void __net_exit dccp_v4_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
}
static struct pernet_operations dccp_v4_ops = {
.init = dccp_v4_init_net,
.exit = dccp_v4_exit_net,
};
static int __init dccp_v4_init(void)
{
int err = proto_register(&dccp_v4_prot, 1);
if (err != 0)
goto out;
err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
if (err != 0)
goto out_proto_unregister;
inet_register_protosw(&dccp_v4_protosw);
err = register_pernet_subsys(&dccp_v4_ops);
if (err)
goto out_destroy_ctl_sock;
out:
return err;
out_destroy_ctl_sock:
inet_unregister_protosw(&dccp_v4_protosw);
inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
out_proto_unregister:
proto_unregister(&dccp_v4_prot);
goto out;
}
static void __exit dccp_v4_exit(void)
{
unregister_pernet_subsys(&dccp_v4_ops);
inet_unregister_protosw(&dccp_v4_protosw);
inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
proto_unregister(&dccp_v4_prot);
}
module_init(dccp_v4_init);
module_exit(dccp_v4_exit);
/*
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
* values directly, Also cover the case where the protocol is not specified,
* i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
|
275,658,719,589,348 | /*
* DCCP over IPv6
* Linux INET6 implementation
*
* Based on net/dccp6/ipv6.c
*
* Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/xfrm.h>
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/inet_sock.h>
#include <net/inet6_connection_sock.h>
#include <net/inet6_hashtables.h>
#include <net/ip6_route.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include "dccp.h"
#include "ipv6.h"
#include "feat.h"
/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
static void dccp_v6_hash(struct sock *sk)
{
if (sk->sk_state != DCCP_CLOSED) {
if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
inet_hash(sk);
return;
}
local_bh_disable();
__inet6_hash(sk, NULL);
local_bh_enable();
}
}
/* add pseudo-header to DCCP checksum stored in skb->csum */
static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr)
{
return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
}
static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
dccp_csum_outgoing(skb);
dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
}
static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
__be16 sport, __be16 dport )
{
return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
}
static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
{
return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
dccp_hdr(skb)->dccph_dport,
dccp_hdr(skb)->dccph_sport );
}
static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
struct dccp_sock *dp;
struct ipv6_pinfo *np;
struct sock *sk;
int err;
__u64 seq;
struct net *net = dev_net(skb->dev);
if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
sk = inet6_lookup(net, &dccp_hashinfo,
&hdr->daddr, dh->dccph_dport,
&hdr->saddr, dh->dccph_sport, inet6_iif(skb));
if (sk == NULL) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED)
goto out;
dp = dccp_sk(sk);
seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
np = inet6_sk(sk);
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
if (sock_owned_by_user(sk))
goto out;
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
goto out;
/* icmp should have updated the destination cache entry */
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
struct inet_sock *inet = inet_sk(sk);
struct flowi6 fl6;
/* BUGGG_FUTURE: Again, it is not clear how
to handle rthdr case. Ignore this complexity
for now.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &np->daddr);
ipv6_addr_copy(&fl6.saddr, &np->saddr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
if (IS_ERR(dst)) {
sk->sk_err_soft = -PTR_ERR(dst);
goto out;
}
} else
dst_hold(dst);
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
dccp_sync_mss(sk, dst_mtu(dst));
} /* else let the usual retransmit timer handle it */
dst_release(dst);
goto out;
}
icmpv6_err_convert(type, code, &err);
/* Might be for an request_sock */
switch (sk->sk_state) {
struct request_sock *req, **prev;
case DCCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
&hdr->daddr, &hdr->saddr,
inet6_iif(skb));
if (req == NULL)
goto out;
/*
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
WARN_ON(req->sk != NULL);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case DCCP_REQUESTING:
case DCCP_RESPOND: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
/*
* Wake people up to see the error
* (see connect in sock.c)
*/
sk->sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct request_values *rv_unused)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct ipv6_txoptions *opt = NULL;
struct in6_addr *final_p, final;
struct flowi6 fl6;
int err = -1;
struct dst_entry *dst;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowlabel = 0;
fl6.flowi6_oif = ireq6->iif;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
opt = np->opt;
final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto done;
}
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v6_csum_finish(skb,
&ireq6->loc_addr,
&ireq6->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
err = ip6_xmit(sk, skb, &fl6, opt);
err = net_xmit_eval(err);
}
done:
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
return err;
}
static void dccp_v6_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
if (inet6_rsk(req)->pktopts != NULL)
kfree_skb(inet6_rsk(req)->pktopts);
}
static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
const struct ipv6hdr *rxip6h;
struct sk_buff *skb;
struct flowi6 fl6;
struct net *net = dev_net(skb_dst(rxskb)->dev);
struct sock *ctl_sk = net->dccp.v6_ctl_sk;
struct dst_entry *dst;
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
return;
if (!ipv6_unicast_destination(rxskb))
return;
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
if (skb == NULL)
return;
rxip6h = ipv6_hdr(rxskb);
dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
&rxip6h->daddr);
memset(&fl6, 0, sizeof(fl6));
ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.flowi6_oif = inet6_iif(rxskb);
fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
/* sk = NULL, but it is safe for now. RST socket required. */
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, NULL);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return;
}
kfree_skb(skb);
}
static struct request_sock_ops dccp6_request_sock_ops = {
.family = AF_INET6,
.obj_size = sizeof(struct dccp6_request_sock),
.rtx_syn_ack = dccp_v6_send_response,
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v6_reqsk_destructor,
.send_reset = dccp_v6_ctl_send_reset,
};
static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *nsk;
struct request_sock **prev;
/* Find possible connection requests. */
struct request_sock *req = inet6_csk_search_req(sk, &prev,
dh->dccph_sport,
&iph->saddr,
&iph->daddr,
inet6_iif(skb));
if (req != NULL)
return dccp_check_req(sk, skb, req, prev);
nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
&iph->saddr, dh->dccph_sport,
&iph->daddr, ntohs(dh->dccph_dport),
inet6_iif(skb));
if (nsk != NULL) {
if (nsk->sk_state != DCCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
return sk;
}
static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct request_sock *req;
struct dccp_request_sock *dreq;
struct inet6_request_sock *ireq6;
struct ipv6_pinfo *np = inet6_sk(sk);
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
if (skb->protocol == htons(ETH_P_IP))
return dccp_v4_conn_request(sk, skb);
if (!ipv6_unicast_destination(skb))
return 0; /* discard, don't send a reset here */
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
}
/*
* There are no SYN attacks on IPv6, yet...
*/
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
if (inet_csk_reqsk_queue_is_full(sk))
goto drop;
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
if (req == NULL)
goto drop;
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
goto drop_and_free;
dreq = dccp_rsk(req);
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
ireq6 = inet6_rsk(req);
ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
ireq6->pktopts = skb;
}
ireq6->iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq6->iif = inet6_iif(skb);
/*
* Step 3: Process LISTEN state
*
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
*
* In fact we defer setting S.GSR, S.SWL, S.SWH to
* dccp_create_openreq_child.
*/
dreq->dreq_isr = dcb->dccpd_seq;
dreq->dreq_iss = dccp_v6_init_sequence(skb);
dreq->dreq_service = service;
if (dccp_v6_send_response(sk, req, NULL))
goto drop_and_free;
inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
return 0;
drop_and_free:
reqsk_free(req);
drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
return -1;
}
static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, dccp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (dst == NULL) {
struct in6_addr *final_p, final;
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl6, opt, &final);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst))
goto out;
}
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, dccp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
__ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->opt = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
kfree_skb(ireq6->pktopts);
ireq6->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* Clone native IPv6 options from listening socket (if any)
*
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
if (opt != NULL) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
dccp_sync_mss(newsk, dst_mtu(dst));
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
/* The socket must have it's spinlock held when we get
* here.
*
* We have a potential double-lock case here, so even when
* doing backlog processing we use the BH locking scheme.
* This is because we cannot sleep with the original spinlock
* held.
*/
static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *opt_skb = NULL;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged.
From backlog it always goes here. Kerboom...
Fortunately, dccp_rcv_established and rcv_established
handle them correctly, but it is not case with
dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
*/
if (skb->protocol == htons(ETH_P_IP))
return dccp_v4_do_rcv(sk, skb);
if (sk_filter(sk, skb))
goto discard;
/*
* socket locking is here for SMP purposes as backlog rcv is currently
* called with bh processing disabled.
*/
/* Do Stevens' IPV6_PKTOPTIONS.
Yes, guys, it is the only place in our code, where we
may make it not affecting IPv4.
The rest of code is protocol independent,
and I do not like idea to uglify IPv4.
Actually, all the idea behind IPV6_PKTOPTIONS
looks not very well thought. For now we latch
options, received in the last packet, enqueued
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
if (np->rxopt.all)
/*
* FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
* (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
*/
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
goto reset;
if (opt_skb) {
/* XXX This is where we would goto ipv6_pktoptions. */
__kfree_skb(opt_skb);
}
return 0;
}
/*
* Step 3: Process LISTEN state
* If S.state == LISTEN,
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
* Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*
* NOTE: the check for the packet types is done in
* dccp_rcv_state_process
*/
if (sk->sk_state == DCCP_LISTEN) {
struct sock *nsk = dccp_v6_hnd_req(sk, skb);
if (nsk == NULL)
goto discard;
/*
* Queue it on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
if (nsk != sk) {
if (dccp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb != NULL)
__kfree_skb(opt_skb);
return 0;
}
}
if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
goto reset;
if (opt_skb) {
/* XXX This is where we would goto ipv6_pktoptions. */
__kfree_skb(opt_skb);
}
return 0;
reset:
dccp_v6_ctl_send_reset(sk, skb);
discard:
if (opt_skb != NULL)
__kfree_skb(opt_skb);
kfree_skb(skb);
return 0;
}
static int dccp_v6_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
struct sock *sk;
int min_cov;
/* Step 1: Check header basics */
if (dccp_invalid_packet(skb))
goto discard_it;
/* Step 1: If header checksum is incorrect, drop packet and return. */
if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr)) {
DCCP_WARN("dropped packet with invalid checksum\n");
goto discard_it;
}
dh = dccp_hdr(skb);
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
if (dccp_packet_without_ack(skb))
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
else
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
/* Step 2:
* Look up flow ID in table and get corresponding socket */
sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
dh->dccph_sport, dh->dccph_dport);
/*
* Step 2:
* If no socket ...
*/
if (sk == NULL) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
goto no_dccp_socket;
}
/*
* Step 2:
* ... or S.state == TIMEWAIT,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_TIME_WAIT) {
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
/*
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
*/
min_cov = dccp_sk(sk)->dccps_pcrlen;
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
dh->dccph_cscov, min_cov);
/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
goto discard_and_relse;
}
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
return sk_receive_skb(sk, skb, 1) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
/*
* Step 2:
* If no socket ...
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
dccp_v6_ctl_send_reset(sk, skb);
}
discard_it:
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
}
static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
int err;
dp->dccps_role = DCCP_ROLE_CLIENT;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
fl6_sock_release(flowlabel);
}
}
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
/* If interface is set while binding, indices
* must coincide.
*/
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
return -EINVAL;
}
ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
np->flow_label = fl6.flowlabel;
/*
* DCCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
icsk->icsk_af_ops = &dccp_ipv6_mapped;
sk->sk_backlog_rcv = dccp_v4_do_rcv;
err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &dccp_ipv6_af_ops;
sk->sk_backlog_rcv = dccp_v6_do_rcv;
goto failure;
}
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
return err;
}
if (!ipv6_addr_any(&np->rcv_saddr))
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &np->daddr);
ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
final_p = fl6_update_dst(&fl6, np->opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
}
if (saddr == NULL) {
saddr = &fl6.saddr;
ipv6_addr_copy(&np->rcv_saddr, saddr);
}
/* set the source address */
ipv6_addr_copy(&np->saddr, saddr);
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
icsk->icsk_ext_hdr_len = 0;
if (np->opt != NULL)
icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
np->opt->opt_nflen);
inet->inet_dport = usin->sin6_port;
dccp_set_state(sk, DCCP_REQUESTING);
err = inet6_hash_connect(&dccp_death_row, sk);
if (err)
goto late_failure;
dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
err = dccp_connect(sk);
if (err)
goto late_failure;
return 0;
late_failure:
dccp_set_state(sk, DCCP_CLOSED);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
return err;
}
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
.queue_xmit = inet6_csk_xmit,
.send_check = dccp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
.conn_request = dccp_v6_conn_request,
.syn_recv_sock = dccp_v6_request_recv_sock,
.net_header_len = sizeof(struct ipv6hdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
/*
* DCCP over IPv4 via INET6 API
*/
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = dccp_v6_conn_request,
.syn_recv_sock = dccp_v6_request_recv_sock,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
static int dccp_v6_init_sock(struct sock *sk)
{
static __u8 dccp_v6_ctl_sock_initialized;
int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
if (err == 0) {
if (unlikely(!dccp_v6_ctl_sock_initialized))
dccp_v6_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
}
return err;
}
static void dccp_v6_destroy_sock(struct sock *sk)
{
dccp_destroy_sock(sk);
inet6_destroy_sock(sk);
}
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
};
static struct proto dccp_v6_prot = {
.name = "DCCPv6",
.owner = THIS_MODULE,
.close = dccp_close,
.connect = dccp_v6_connect,
.disconnect = dccp_disconnect,
.ioctl = dccp_ioctl,
.init = dccp_v6_init_sock,
.setsockopt = dccp_setsockopt,
.getsockopt = dccp_getsockopt,
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v6_do_rcv,
.hash = dccp_v6_hash,
.unhash = inet_unhash,
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_v6_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.rsk_prot = &dccp6_request_sock_ops,
.twsk_prot = &dccp6_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
};
static const struct inet6_protocol dccp_v6_protocol = {
.handler = dccp_v6_rcv,
.err_handler = dccp_v6_err,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
};
static const struct proto_ops inet6_dccp_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet6_getname,
.poll = dccp_poll,
.ioctl = inet6_ioctl,
.listen = inet_dccp_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw dccp_v6_protosw = {
.type = SOCK_DCCP,
.protocol = IPPROTO_DCCP,
.prot = &dccp_v6_prot,
.ops = &inet6_dccp_ops,
.flags = INET_PROTOSW_ICSK,
};
static int __net_init dccp_v6_init_net(struct net *net)
{
if (dccp_hashinfo.bhash == NULL)
return -ESOCKTNOSUPPORT;
return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
SOCK_DCCP, IPPROTO_DCCP, net);
}
static void __net_exit dccp_v6_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
}
static struct pernet_operations dccp_v6_ops = {
.init = dccp_v6_init_net,
.exit = dccp_v6_exit_net,
};
static int __init dccp_v6_init(void)
{
int err = proto_register(&dccp_v6_prot, 1);
if (err != 0)
goto out;
err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
if (err != 0)
goto out_unregister_proto;
inet6_register_protosw(&dccp_v6_protosw);
err = register_pernet_subsys(&dccp_v6_ops);
if (err != 0)
goto out_destroy_ctl_sock;
out:
return err;
out_destroy_ctl_sock:
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
inet6_unregister_protosw(&dccp_v6_protosw);
out_unregister_proto:
proto_unregister(&dccp_v6_prot);
goto out;
}
static void __exit dccp_v6_exit(void)
{
unregister_pernet_subsys(&dccp_v6_ops);
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
inet6_unregister_protosw(&dccp_v6_protosw);
proto_unregister(&dccp_v6_prot);
}
module_init(dccp_v6_init);
module_exit(dccp_v6_exit);
/*
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
* values directly, Also cover the case where the protocol is not specified,
* i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
| /*
* DCCP over IPv6
* Linux INET6 implementation
*
* Based on net/dccp6/ipv6.c
*
* Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/xfrm.h>
#include <net/addrconf.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/inet_sock.h>
#include <net/inet6_connection_sock.h>
#include <net/inet6_hashtables.h>
#include <net/ip6_route.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include "dccp.h"
#include "ipv6.h"
#include "feat.h"
/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
static void dccp_v6_hash(struct sock *sk)
{
if (sk->sk_state != DCCP_CLOSED) {
if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
inet_hash(sk);
return;
}
local_bh_disable();
__inet6_hash(sk, NULL);
local_bh_enable();
}
}
/* add pseudo-header to DCCP checksum stored in skb->csum */
static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr)
{
return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
}
static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
dccp_csum_outgoing(skb);
dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
}
static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
__be16 sport, __be16 dport )
{
return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
}
static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
{
return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
dccp_hdr(skb)->dccph_dport,
dccp_hdr(skb)->dccph_sport );
}
static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
struct dccp_sock *dp;
struct ipv6_pinfo *np;
struct sock *sk;
int err;
__u64 seq;
struct net *net = dev_net(skb->dev);
if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
sk = inet6_lookup(net, &dccp_hashinfo,
&hdr->daddr, dh->dccph_dport,
&hdr->saddr, dh->dccph_sport, inet6_iif(skb));
if (sk == NULL) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED)
goto out;
dp = dccp_sk(sk);
seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
np = inet6_sk(sk);
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
if (sock_owned_by_user(sk))
goto out;
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
goto out;
/* icmp should have updated the destination cache entry */
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
struct inet_sock *inet = inet_sk(sk);
struct flowi6 fl6;
/* BUGGG_FUTURE: Again, it is not clear how
to handle rthdr case. Ignore this complexity
for now.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &np->daddr);
ipv6_addr_copy(&fl6.saddr, &np->saddr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
if (IS_ERR(dst)) {
sk->sk_err_soft = -PTR_ERR(dst);
goto out;
}
} else
dst_hold(dst);
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
dccp_sync_mss(sk, dst_mtu(dst));
} /* else let the usual retransmit timer handle it */
dst_release(dst);
goto out;
}
icmpv6_err_convert(type, code, &err);
/* Might be for an request_sock */
switch (sk->sk_state) {
struct request_sock *req, **prev;
case DCCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
&hdr->daddr, &hdr->saddr,
inet6_iif(skb));
if (req == NULL)
goto out;
/*
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
WARN_ON(req->sk != NULL);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case DCCP_REQUESTING:
case DCCP_RESPOND: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
/*
* Wake people up to see the error
* (see connect in sock.c)
*/
sk->sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct request_values *rv_unused)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct ipv6_txoptions *opt = NULL;
struct in6_addr *final_p, final;
struct flowi6 fl6;
int err = -1;
struct dst_entry *dst;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowlabel = 0;
fl6.flowi6_oif = ireq6->iif;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
opt = np->opt;
final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto done;
}
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v6_csum_finish(skb,
&ireq6->loc_addr,
&ireq6->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
err = ip6_xmit(sk, skb, &fl6, opt);
err = net_xmit_eval(err);
}
done:
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
return err;
}
static void dccp_v6_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
if (inet6_rsk(req)->pktopts != NULL)
kfree_skb(inet6_rsk(req)->pktopts);
}
static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
const struct ipv6hdr *rxip6h;
struct sk_buff *skb;
struct flowi6 fl6;
struct net *net = dev_net(skb_dst(rxskb)->dev);
struct sock *ctl_sk = net->dccp.v6_ctl_sk;
struct dst_entry *dst;
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
return;
if (!ipv6_unicast_destination(rxskb))
return;
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
if (skb == NULL)
return;
rxip6h = ipv6_hdr(rxskb);
dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
&rxip6h->daddr);
memset(&fl6, 0, sizeof(fl6));
ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.flowi6_oif = inet6_iif(rxskb);
fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
/* sk = NULL, but it is safe for now. RST socket required. */
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, NULL);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return;
}
kfree_skb(skb);
}
static struct request_sock_ops dccp6_request_sock_ops = {
.family = AF_INET6,
.obj_size = sizeof(struct dccp6_request_sock),
.rtx_syn_ack = dccp_v6_send_response,
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v6_reqsk_destructor,
.send_reset = dccp_v6_ctl_send_reset,
};
static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *nsk;
struct request_sock **prev;
/* Find possible connection requests. */
struct request_sock *req = inet6_csk_search_req(sk, &prev,
dh->dccph_sport,
&iph->saddr,
&iph->daddr,
inet6_iif(skb));
if (req != NULL)
return dccp_check_req(sk, skb, req, prev);
nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
&iph->saddr, dh->dccph_sport,
&iph->daddr, ntohs(dh->dccph_dport),
inet6_iif(skb));
if (nsk != NULL) {
if (nsk->sk_state != DCCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
return sk;
}
static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct request_sock *req;
struct dccp_request_sock *dreq;
struct inet6_request_sock *ireq6;
struct ipv6_pinfo *np = inet6_sk(sk);
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
if (skb->protocol == htons(ETH_P_IP))
return dccp_v4_conn_request(sk, skb);
if (!ipv6_unicast_destination(skb))
return 0; /* discard, don't send a reset here */
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
}
/*
* There are no SYN attacks on IPv6, yet...
*/
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
if (inet_csk_reqsk_queue_is_full(sk))
goto drop;
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
if (req == NULL)
goto drop;
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
goto drop_and_free;
dreq = dccp_rsk(req);
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
ireq6 = inet6_rsk(req);
ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
ireq6->pktopts = skb;
}
ireq6->iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq6->iif = inet6_iif(skb);
/*
* Step 3: Process LISTEN state
*
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
*
* In fact we defer setting S.GSR, S.SWL, S.SWH to
* dccp_create_openreq_child.
*/
dreq->dreq_isr = dcb->dccpd_seq;
dreq->dreq_iss = dccp_v6_init_sequence(skb);
dreq->dreq_service = service;
if (dccp_v6_send_response(sk, req, NULL))
goto drop_and_free;
inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
return 0;
drop_and_free:
reqsk_free(req);
drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
return -1;
}
static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, dccp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (dst == NULL) {
struct in6_addr *final_p, final;
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl6, opt, &final);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst))
goto out;
}
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, dccp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
__ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
kfree_skb(ireq6->pktopts);
ireq6->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* Clone native IPv6 options from listening socket (if any)
*
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
if (opt != NULL) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
dccp_sync_mss(newsk, dst_mtu(dst));
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
/* The socket must have it's spinlock held when we get
* here.
*
* We have a potential double-lock case here, so even when
* doing backlog processing we use the BH locking scheme.
* This is because we cannot sleep with the original spinlock
* held.
*/
static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *opt_skb = NULL;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged.
From backlog it always goes here. Kerboom...
Fortunately, dccp_rcv_established and rcv_established
handle them correctly, but it is not case with
dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
*/
if (skb->protocol == htons(ETH_P_IP))
return dccp_v4_do_rcv(sk, skb);
if (sk_filter(sk, skb))
goto discard;
/*
* socket locking is here for SMP purposes as backlog rcv is currently
* called with bh processing disabled.
*/
/* Do Stevens' IPV6_PKTOPTIONS.
Yes, guys, it is the only place in our code, where we
may make it not affecting IPv4.
The rest of code is protocol independent,
and I do not like idea to uglify IPv4.
Actually, all the idea behind IPV6_PKTOPTIONS
looks not very well thought. For now we latch
options, received in the last packet, enqueued
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
if (np->rxopt.all)
/*
* FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
* (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
*/
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
goto reset;
if (opt_skb) {
/* XXX This is where we would goto ipv6_pktoptions. */
__kfree_skb(opt_skb);
}
return 0;
}
/*
* Step 3: Process LISTEN state
* If S.state == LISTEN,
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
* Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*
* NOTE: the check for the packet types is done in
* dccp_rcv_state_process
*/
if (sk->sk_state == DCCP_LISTEN) {
struct sock *nsk = dccp_v6_hnd_req(sk, skb);
if (nsk == NULL)
goto discard;
/*
* Queue it on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
if (nsk != sk) {
if (dccp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb != NULL)
__kfree_skb(opt_skb);
return 0;
}
}
if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
goto reset;
if (opt_skb) {
/* XXX This is where we would goto ipv6_pktoptions. */
__kfree_skb(opt_skb);
}
return 0;
reset:
dccp_v6_ctl_send_reset(sk, skb);
discard:
if (opt_skb != NULL)
__kfree_skb(opt_skb);
kfree_skb(skb);
return 0;
}
static int dccp_v6_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
struct sock *sk;
int min_cov;
/* Step 1: Check header basics */
if (dccp_invalid_packet(skb))
goto discard_it;
/* Step 1: If header checksum is incorrect, drop packet and return. */
if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr)) {
DCCP_WARN("dropped packet with invalid checksum\n");
goto discard_it;
}
dh = dccp_hdr(skb);
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
if (dccp_packet_without_ack(skb))
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
else
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
/* Step 2:
* Look up flow ID in table and get corresponding socket */
sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
dh->dccph_sport, dh->dccph_dport);
/*
* Step 2:
* If no socket ...
*/
if (sk == NULL) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
goto no_dccp_socket;
}
/*
* Step 2:
* ... or S.state == TIMEWAIT,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_TIME_WAIT) {
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
/*
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
*/
min_cov = dccp_sk(sk)->dccps_pcrlen;
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
dh->dccph_cscov, min_cov);
/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
goto discard_and_relse;
}
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
return sk_receive_skb(sk, skb, 1) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
/*
* Step 2:
* If no socket ...
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
dccp_v6_ctl_send_reset(sk, skb);
}
discard_it:
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
}
static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
int err;
dp->dccps_role = DCCP_ROLE_CLIENT;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
fl6_sock_release(flowlabel);
}
}
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
/* If interface is set while binding, indices
* must coincide.
*/
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
return -EINVAL;
}
ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
np->flow_label = fl6.flowlabel;
/*
* DCCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
icsk->icsk_af_ops = &dccp_ipv6_mapped;
sk->sk_backlog_rcv = dccp_v4_do_rcv;
err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &dccp_ipv6_af_ops;
sk->sk_backlog_rcv = dccp_v6_do_rcv;
goto failure;
}
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
return err;
}
if (!ipv6_addr_any(&np->rcv_saddr))
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &np->daddr);
ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
final_p = fl6_update_dst(&fl6, np->opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
}
if (saddr == NULL) {
saddr = &fl6.saddr;
ipv6_addr_copy(&np->rcv_saddr, saddr);
}
/* set the source address */
ipv6_addr_copy(&np->saddr, saddr);
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
icsk->icsk_ext_hdr_len = 0;
if (np->opt != NULL)
icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
np->opt->opt_nflen);
inet->inet_dport = usin->sin6_port;
dccp_set_state(sk, DCCP_REQUESTING);
err = inet6_hash_connect(&dccp_death_row, sk);
if (err)
goto late_failure;
dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
err = dccp_connect(sk);
if (err)
goto late_failure;
return 0;
late_failure:
dccp_set_state(sk, DCCP_CLOSED);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
return err;
}
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
.queue_xmit = inet6_csk_xmit,
.send_check = dccp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
.conn_request = dccp_v6_conn_request,
.syn_recv_sock = dccp_v6_request_recv_sock,
.net_header_len = sizeof(struct ipv6hdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
/*
* DCCP over IPv4 via INET6 API
*/
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = dccp_v6_conn_request,
.syn_recv_sock = dccp_v6_request_recv_sock,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
static int dccp_v6_init_sock(struct sock *sk)
{
static __u8 dccp_v6_ctl_sock_initialized;
int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
if (err == 0) {
if (unlikely(!dccp_v6_ctl_sock_initialized))
dccp_v6_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
}
return err;
}
static void dccp_v6_destroy_sock(struct sock *sk)
{
dccp_destroy_sock(sk);
inet6_destroy_sock(sk);
}
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
};
static struct proto dccp_v6_prot = {
.name = "DCCPv6",
.owner = THIS_MODULE,
.close = dccp_close,
.connect = dccp_v6_connect,
.disconnect = dccp_disconnect,
.ioctl = dccp_ioctl,
.init = dccp_v6_init_sock,
.setsockopt = dccp_setsockopt,
.getsockopt = dccp_getsockopt,
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v6_do_rcv,
.hash = dccp_v6_hash,
.unhash = inet_unhash,
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_v6_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.rsk_prot = &dccp6_request_sock_ops,
.twsk_prot = &dccp6_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
};
static const struct inet6_protocol dccp_v6_protocol = {
.handler = dccp_v6_rcv,
.err_handler = dccp_v6_err,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
};
static const struct proto_ops inet6_dccp_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet6_getname,
.poll = dccp_poll,
.ioctl = inet6_ioctl,
.listen = inet_dccp_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw dccp_v6_protosw = {
.type = SOCK_DCCP,
.protocol = IPPROTO_DCCP,
.prot = &dccp_v6_prot,
.ops = &inet6_dccp_ops,
.flags = INET_PROTOSW_ICSK,
};
static int __net_init dccp_v6_init_net(struct net *net)
{
if (dccp_hashinfo.bhash == NULL)
return -ESOCKTNOSUPPORT;
return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
SOCK_DCCP, IPPROTO_DCCP, net);
}
static void __net_exit dccp_v6_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
}
static struct pernet_operations dccp_v6_ops = {
.init = dccp_v6_init_net,
.exit = dccp_v6_exit_net,
};
static int __init dccp_v6_init(void)
{
int err = proto_register(&dccp_v6_prot, 1);
if (err != 0)
goto out;
err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
if (err != 0)
goto out_unregister_proto;
inet6_register_protosw(&dccp_v6_protosw);
err = register_pernet_subsys(&dccp_v6_ops);
if (err != 0)
goto out_destroy_ctl_sock;
out:
return err;
out_destroy_ctl_sock:
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
inet6_unregister_protosw(&dccp_v6_protosw);
out_unregister_proto:
proto_unregister(&dccp_v6_prot);
goto out;
}
static void __exit dccp_v6_exit(void)
{
unregister_pernet_subsys(&dccp_v6_ops);
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
inet6_unregister_protosw(&dccp_v6_protosw);
proto_unregister(&dccp_v6_prot);
}
module_init(dccp_v6_init);
module_exit(dccp_v6_exit);
/*
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
* values directly, Also cover the case where the protocol is not specified,
* i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
|
188,878,254,239,156 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PF_INET protocol family socket handler.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Florian La Roche, <flla@stud.uni-sb.de>
* Alan Cox, <A.Cox@swansea.ac.uk>
*
* Changes (see also sock.c)
*
* piggy,
* Karl Knutson : Socket protocol table
* A.N.Kuznetsov : Socket death error in accept().
* John Richardson : Fix non blocking error in connect()
* so sockets that fail to connect
* don't return -EINPROGRESS.
* Alan Cox : Asynchronous I/O support
* Alan Cox : Keep correct socket pointer on sock
* structures
* when accept() ed
* Alan Cox : Semantics of SO_LINGER aren't state
* moved to close when you look carefully.
* With this fixed and the accept bug fixed
* some RPC stuff seems happier.
* Niibe Yutaka : 4.4BSD style write async I/O
* Alan Cox,
* Tony Gale : Fixed reuse semantics.
* Alan Cox : bind() shouldn't abort existing but dead
* sockets. Stops FTP netin:.. I hope.
* Alan Cox : bind() works correctly for RAW sockets.
* Note that FreeBSD at least was broken
* in this respect so be careful with
* compatibility tests...
* Alan Cox : routing cache support
* Alan Cox : memzero the socket structure for
* compactness.
* Matt Day : nonblock connect error handler
* Alan Cox : Allow large numbers of pending sockets
* (eg for big web sites), but only if
* specifically application requested.
* Alan Cox : New buffering throughout IP. Used
* dumbly.
* Alan Cox : New buffering now used smartly.
* Alan Cox : BSD rather than common sense
* interpretation of listen.
* Germano Caronni : Assorted small races.
* Alan Cox : sendmsg/recvmsg basic support.
* Alan Cox : Only sendmsg/recvmsg now supported.
* Alan Cox : Locked down bind (see security list).
* Alan Cox : Loosened bind a little.
* Mike McLagan : ADD/DEL DLCI Ioctls
* Willy Konynenberg : Transparent proxying support.
* David S. Miller : New socket lookup architecture.
* Some other random speedups.
* Cyrus Durgin : Cleaned up file for kmod hacks.
* Andi Kleen : Fix inet_stream_connect TCP race.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/inet.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/inet_connection_sock.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/raw.h>
#include <net/icmp.h>
#include <net/ipip.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
*/
static struct list_head inetsw[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw_lock);
struct ipv4_config ipv4_config;
EXPORT_SYMBOL(ipv4_config);
/* New destruction routine */
void inet_sock_destruct(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
pr_err("Attempt to release TCP socket in state %d %p\n",
sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive inet socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt);
dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
/*
* The routines beyond this point handle the behaviour of an AF_INET
* socket object. Mostly it punts to the subprotocols of IP to do
* the work.
*/
/*
* Automatically bind an unbound socket.
*/
static int inet_autobind(struct sock *sk)
{
struct inet_sock *inet;
/* We may need to bind the socket. */
lock_sock(sk);
inet = inet_sk(sk);
if (!inet->inet_num) {
if (sk->sk_prot->get_port(sk, 0)) {
release_sock(sk);
return -EAGAIN;
}
inet->inet_sport = htons(inet->inet_num);
}
release_sock(sk);
return 0;
}
/*
* Move a socket into listening state.
*/
int inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
unsigned char old_state;
int err;
lock_sock(sk);
err = -EINVAL;
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
goto out;
old_state = sk->sk_state;
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
if (old_state != TCP_LISTEN) {
err = inet_csk_listen_start(sk, backlog);
if (err)
goto out;
}
sk->sk_max_ack_backlog = backlog;
err = 0;
out:
release_sock(sk);
return err;
}
EXPORT_SYMBOL(inet_listen);
u32 inet_ehash_secret __read_mostly;
EXPORT_SYMBOL(inet_ehash_secret);
/*
* inet_ehash_secret must be set exactly once
*/
void build_ehash_secret(void)
{
u32 rnd;
do {
get_random_bytes(&rnd, sizeof(rnd));
} while (rnd == 0);
cmpxchg(&inet_ehash_secret, 0, rnd);
}
EXPORT_SYMBOL(build_ehash_secret);
static inline int inet_netns_ok(struct net *net, int protocol)
{
int hash;
const struct net_protocol *ipprot;
if (net_eq(net, &init_net))
return 1;
hash = protocol & (MAX_INET_PROTOS - 1);
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot == NULL)
/* raw IP is OK */
return 1;
return ipprot->netns_ok;
}
/*
* Create an inet socket.
*/
static int inet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct inet_protosw *answer;
struct inet_sock *inet;
struct proto *answer_prot;
unsigned char answer_flags;
char answer_no_check;
int try_loading_module = 0;
int err;
if (unlikely(!inet_ehash_secret))
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
build_ehash_secret();
sock->state = SS_UNCONNECTED;
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
rcu_read_lock();
list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
err = 0;
/* Check the non-wild match. */
if (protocol == answer->protocol) {
if (protocol != IPPROTO_IP)
break;
} else {
/* Check for the two wild cases. */
if (IPPROTO_IP == protocol) {
protocol = answer->protocol;
break;
}
if (IPPROTO_IP == answer->protocol)
break;
}
err = -EPROTONOSUPPORT;
}
if (unlikely(err)) {
if (try_loading_module < 2) {
rcu_read_unlock();
/*
* Be more specific, e.g. net-pf-2-proto-132-type-1
* (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
*/
if (++try_loading_module == 1)
request_module("net-pf-%d-proto-%d-type-%d",
PF_INET, protocol, sock->type);
/*
* Fall back to generic, e.g. net-pf-2-proto-132
* (net-pf-PF_INET-proto-IPPROTO_SCTP)
*/
else
request_module("net-pf-%d-proto-%d",
PF_INET, protocol);
goto lookup_protocol;
} else
goto out_rcu_unlock;
}
err = -EPERM;
if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
goto out_rcu_unlock;
err = -EAFNOSUPPORT;
if (!inet_netns_ok(net, protocol))
goto out_rcu_unlock;
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_no_check = answer->no_check;
answer_flags = answer->flags;
rcu_read_unlock();
WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
if (sk == NULL)
goto out;
err = 0;
sk->sk_no_check = answer_no_check;
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = 1;
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
inet->nodefrag = 0;
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
if (ipv4_config.no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
inet->inet_id = 0;
sock_init_data(sock, sk);
sk->sk_destruct = inet_sock_destruct;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
inet->uc_ttl = -1;
inet->mc_loop = 1;
inet->mc_ttl = 1;
inet->mc_all = 1;
inet->mc_index = 0;
inet->mc_list = NULL;
sk_refcnt_debug_inc(sk);
if (inet->inet_num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically
* shares.
*/
inet->inet_sport = htons(inet->inet_num);
/* Add to protocol hash chains. */
sk->sk_prot->hash(sk);
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
if (err)
sk_common_release(sk);
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
}
/*
* The peer socket should always be NULL (or else). When we call this
* function we are destroying the object and from then on nobody
* should refer to it.
*/
int inet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
long timeout;
sock_rps_reset_flow(sk);
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
/* If linger is set, we don't return until the close
* is complete. Otherwise we return immediately. The
* actually closing is done the same either way.
*
* If the close is due to the process exiting, we never
* linger..
*/
timeout = 0;
if (sock_flag(sk, SOCK_LINGER) &&
!(current->flags & PF_EXITING))
timeout = sk->sk_lingertime;
sock->sk = NULL;
sk->sk_prot->close(sk, timeout);
}
return 0;
}
EXPORT_SYMBOL(inet_release);
/* It is off by default, see below. */
int sysctl_ip_nonlocal_bind __read_mostly;
EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
unsigned short snum;
int chk_addr_ret;
int err;
/* If the socket has its own bind function then use it. (RAW) */
if (sk->sk_prot->bind) {
err = sk->sk_prot->bind(sk, uaddr, addr_len);
goto out;
}
err = -EINVAL;
if (addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
/* Not specified by any standard per-se, however it breaks too
* many applications when removed. It is unfortunate since
* allowing applications to make a non-local bind solves
* several problems with systems using dynamic addressing.
* (ie. your servers still start up even if your ISDN link
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
if (!sysctl_ip_nonlocal_bind &&
!(inet->freebind || inet->transparent) &&
addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
chk_addr_ret != RTN_BROADCAST)
goto out;
snum = ntohs(addr->sin_port);
err = -EACCES;
if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
goto out;
/* We keep a pair of addresses. rcv_saddr is the one
* used by hash lookups, and saddr is used for transmit.
*
* In the BSD API these are the same except where it
* would be illegal to use them (multicast/broadcast) in
* which case the sending device address is used.
*/
lock_sock(sk);
/* Check these errors (active socket, double bind). */
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE || inet->inet_num)
goto out_release_sock;
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
if (sk->sk_prot->get_port(sk, snum)) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
}
if (inet->inet_rcv_saddr)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->inet_sport = htons(inet->inet_num);
inet->inet_daddr = 0;
inet->inet_dport = 0;
sk_dst_reset(sk);
err = 0;
out_release_sock:
release_sock(sk);
out:
return err;
}
EXPORT_SYMBOL(inet_bind);
int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC)
return sk->sk_prot->disconnect(sk, flags);
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
EXPORT_SYMBOL(inet_dgram_connect);
static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
* Connect() does not allow to get error notifications
* without closing the socket.
*/
while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
return timeo;
}
/*
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
*/
int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
int err;
long timeo;
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
lock_sock(sk);
if (uaddr->sa_family == AF_UNSPEC) {
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
}
switch (sock->state) {
default:
err = -EINVAL;
goto out;
case SS_CONNECTED:
err = -EISCONN;
goto out;
case SS_CONNECTING:
err = -EALREADY;
/* Fall out of switch with err, set for this state */
break;
case SS_UNCONNECTED:
err = -EISCONN;
if (sk->sk_state != TCP_CLOSE)
goto out;
err = sk->sk_prot->connect(sk, uaddr, addr_len);
if (err < 0)
goto out;
sock->state = SS_CONNECTING;
/* Just entered SS_CONNECTING state; the only
* difference is that return value in non-blocking
* case is EINPROGRESS, rather than EALREADY.
*/
err = -EINPROGRESS;
break;
}
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo))
goto out;
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
}
/* Connection was closed by RST, timeout, ICMP error
* or another process disconnected us.
*/
if (sk->sk_state == TCP_CLOSE)
goto sock_error;
/* sk->sk_err may be not zero now, if RECVERR was ordered by user
* and error was received after socket entered established state.
* Hence, it is handled normally after connect() return successfully.
*/
sock->state = SS_CONNECTED;
err = 0;
out:
release_sock(sk);
return err;
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
}
EXPORT_SYMBOL(inet_stream_connect);
/*
* Accept a pending connection. The TCP layer now gives BSD semantics.
*/
int inet_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk1 = sock->sk;
int err = -EINVAL;
struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
if (!sk2)
goto do_err;
lock_sock(sk2);
WARN_ON(!((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
sock_graft(sk2, newsock);
newsock->state = SS_CONNECTED;
err = 0;
release_sock(sk2);
do_err:
return err;
}
EXPORT_SYMBOL(inet_accept);
/*
* This does both peername and sockname.
*/
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
sin->sin_family = AF_INET;
if (peer) {
if (!inet->inet_dport ||
(((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
peer == 1))
return -ENOTCONN;
sin->sin_port = inet->inet_dport;
sin->sin_addr.s_addr = inet->inet_daddr;
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
sin->sin_port = inet->inet_sport;
sin->sin_addr.s_addr = addr;
}
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*uaddr_len = sizeof(*sin);
return 0;
}
EXPORT_SYMBOL(inet_getname);
int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->sendmsg(iocb, sk, msg, size);
}
EXPORT_SYMBOL(inet_sendmsg);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
if (sk->sk_prot->sendpage)
return sk->sk_prot->sendpage(sk, page, offset, size, flags);
return sock_no_sendpage(sock, page, offset, size, flags);
}
EXPORT_SYMBOL(inet_sendpage);
int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
int addr_len = 0;
int err;
sock_rps_record_flow(sk);
err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
}
EXPORT_SYMBOL(inet_recvmsg);
int inet_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
/* This should really check to make sure
* the socket is a TCP socket. (WHY AC...)
*/
how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
1->2 bit 2 snds.
2->3 */
if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
return -EINVAL;
lock_sock(sk);
if (sock->state == SS_CONNECTING) {
if ((1 << sk->sk_state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING;
else
sock->state = SS_CONNECTED;
}
switch (sk->sk_state) {
case TCP_CLOSE:
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
POLLHUP, even on eg. unconnected UDP sockets -- RR */
default:
sk->sk_shutdown |= how;
if (sk->sk_prot->shutdown)
sk->sk_prot->shutdown(sk, how);
break;
/* Remaining two branches are temporary solution for missing
* close() in multithreaded environment. It is _not_ a good idea,
* but we have no choice until close() is repaired at VFS level.
*/
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
/* Fall through */
case TCP_SYN_SENT:
err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
break;
}
/* Wake up anyone sleeping in poll. */
sk->sk_state_change(sk);
release_sock(sk);
return err;
}
EXPORT_SYMBOL(inet_shutdown);
/*
* ioctl() calls you can issue on an INET socket. Most of these are
* device configuration and stuff and very rarely used. Some ioctls
* pass on to the socket itself.
*
* NOTE: I like the idea of a module for the config stuff. ie ifconfig
* loads the devconfigure module does its configuring and unloads it.
* There's a good 20K of config code hanging around the kernel.
*/
int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = 0;
struct net *net = sock_net(sk);
switch (cmd) {
case SIOCGSTAMP:
err = sock_get_timestamp(sk, (struct timeval __user *)arg);
break;
case SIOCGSTAMPNS:
err = sock_get_timestampns(sk, (struct timespec __user *)arg);
break;
case SIOCADDRT:
case SIOCDELRT:
case SIOCRTMSG:
err = ip_rt_ioctl(net, cmd, (void __user *)arg);
break;
case SIOCDARP:
case SIOCGARP:
case SIOCSARP:
err = arp_ioctl(net, cmd, (void __user *)arg);
break;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
err = devinet_ioctl(net, cmd, (void __user *)arg);
break;
default:
if (sk->sk_prot->ioctl)
err = sk->sk_prot->ioctl(sk, cmd, arg);
else
err = -ENOIOCTLCMD;
break;
}
return err;
}
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = -ENOIOCTLCMD;
if (sk->sk_prot->compat_ioctl)
err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
return err;
}
#endif
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
.poll = tcp_poll,
.ioctl = inet_ioctl,
.listen = inet_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_stream_ops);
const struct proto_ops inet_dgram_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
.poll = udp_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
/*
* For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
* udp_poll
*/
static const struct proto_ops inet_sockraw_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
static const struct net_proto_family inet_family_ops = {
.family = PF_INET,
.create = inet_create,
.owner = THIS_MODULE,
};
/* Upon startup we insert all the elements in inetsw_array[] into
* the linked list inetsw.
*/
static struct inet_protosw inetsw_array[] =
{
{
.type = SOCK_STREAM,
.protocol = IPPROTO_TCP,
.prot = &tcp_prot,
.ops = &inet_stream_ops,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
},
{
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udp_prot,
.ops = &inet_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
},
{
.type = SOCK_RAW,
.protocol = IPPROTO_IP, /* wild card */
.prot = &raw_prot,
.ops = &inet_sockraw_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
}
};
#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
void inet_register_protosw(struct inet_protosw *p)
{
struct list_head *lh;
struct inet_protosw *answer;
int protocol = p->protocol;
struct list_head *last_perm;
spin_lock_bh(&inetsw_lock);
if (p->type >= SOCK_MAX)
goto out_illegal;
/* If we are trying to override a permanent protocol, bail. */
answer = NULL;
last_perm = &inetsw[p->type];
list_for_each(lh, &inetsw[p->type]) {
answer = list_entry(lh, struct inet_protosw, list);
/* Check only the non-wild match. */
if (INET_PROTOSW_PERMANENT & answer->flags) {
if (protocol == answer->protocol)
break;
last_perm = lh;
}
answer = NULL;
}
if (answer)
goto out_permanent;
/* Add the new entry after the last permanent entry if any, so that
* the new entry does not override a permanent entry when matched with
* a wild-card protocol. But it is allowed to override any existing
* non-permanent entry. This means that when we remove this entry, the
* system automatically returns to the old behavior.
*/
list_add_rcu(&p->list, last_perm);
out:
spin_unlock_bh(&inetsw_lock);
return;
out_permanent:
printk(KERN_ERR "Attempt to override permanent protocol %d.\n",
protocol);
goto out;
out_illegal:
printk(KERN_ERR
"Ignoring attempt to register invalid socket type %d.\n",
p->type);
goto out;
}
EXPORT_SYMBOL(inet_register_protosw);
void inet_unregister_protosw(struct inet_protosw *p)
{
if (INET_PROTOSW_PERMANENT & p->flags) {
printk(KERN_ERR
"Attempt to unregister permanent protocol %d.\n",
p->protocol);
} else {
spin_lock_bh(&inetsw_lock);
list_del_rcu(&p->list);
spin_unlock_bh(&inetsw_lock);
synchronize_net();
}
}
EXPORT_SYMBOL(inet_unregister_protosw);
/*
* Shall we try to damage output packets if routing dev changes?
*/
int sysctl_ip_dynaddr __read_mostly;
static int inet_sk_reselect_saddr(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__be32 old_saddr = inet->inet_saddr;
__be32 daddr = inet->inet_daddr;
struct flowi4 fl4;
struct rtable *rt;
__be32 new_saddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
/* Query new route. */
rt = ip_route_connect(&fl4, daddr, 0, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if, sk->sk_protocol,
inet->inet_sport, inet->inet_dport, sk, false);
if (IS_ERR(rt))
return PTR_ERR(rt);
sk_setup_caps(sk, &rt->dst);
new_saddr = rt->rt_src;
if (new_saddr == old_saddr)
return 0;
if (sysctl_ip_dynaddr > 1) {
printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n",
__func__, &old_saddr, &new_saddr);
}
inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
/*
* XXX The only one ugly spot where we need to
* XXX really change the sockets identity after
* XXX it has entered the hashes. -DaveM
*
* Besides that, it does not check for connection
* uniqueness. Wait for troubles.
*/
__sk_prot_rehash(sk);
return 0;
}
int inet_sk_rebuild_header(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
int err;
/* Route is OK, nothing to do. */
if (rt)
return 0;
/* Reroute. */
daddr = inet->inet_daddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
} else {
err = PTR_ERR(rt);
/* Routing failed... */
sk->sk_route_caps = 0;
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
*/
if (!sysctl_ip_dynaddr ||
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
sk->sk_err_soft = -err;
}
return err;
}
EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
const struct iphdr *iph;
const struct net_protocol *ops;
int proto;
int ihl;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
proto = iph->protocol & (MAX_INET_PROTOS - 1);
err = -EPROTONOSUPPORT;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (likely(ops && ops->gso_send_check))
err = ops->gso_send_check(skb);
rcu_read_unlock();
out:
return err;
}
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
const struct net_protocol *ops;
int proto;
int ihl;
int id;
unsigned int offset = 0;
if (!(features & NETIF_F_V4_CSUM))
features &= ~NETIF_F_SG;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
0)))
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
id = ntohs(iph->id);
proto = iph->protocol & (MAX_INET_PROTOS - 1);
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (likely(ops && ops->gso_segment))
segs = ops->gso_segment(skb, features);
rcu_read_unlock();
if (!segs || IS_ERR(segs))
goto out;
skb = segs;
do {
iph = ip_hdr(skb);
if (proto == IPPROTO_UDP) {
iph->id = htons(id);
iph->frag_off = htons(offset >> 3);
if (skb->next != NULL)
iph->frag_off |= htons(IP_MF);
offset += (skb->len - skb->mac_len - iph->ihl * 4);
} else
iph->id = htons(id++);
iph->tot_len = htons(skb->len - skb->mac_len);
iph->check = 0;
iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
} while ((skb = skb->next));
out:
return segs;
}
static struct sk_buff **inet_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
const struct net_protocol *ops;
struct sk_buff **pp = NULL;
struct sk_buff *p;
const struct iphdr *iph;
unsigned int hlen;
unsigned int off;
unsigned int id;
int flush = 1;
int proto;
off = skb_gro_offset(skb);
hlen = off + sizeof(*iph);
iph = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
iph = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!iph))
goto out;
}
proto = iph->protocol & (MAX_INET_PROTOS - 1);
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (!ops || !ops->gro_receive)
goto out_unlock;
if (*(u8 *)iph != 0x45)
goto out_unlock;
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto out_unlock;
id = ntohl(*(__be32 *)&iph->id);
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
id >>= 16;
for (p = *head; p; p = p->next) {
struct iphdr *iph2;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
iph2 = ip_hdr(p);
if ((iph->protocol ^ iph2->protocol) |
(iph->tos ^ iph2->tos) |
((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
/* All fields must match except length and checksum. */
NAPI_GRO_CB(p)->flush |=
(iph->ttl ^ iph2->ttl) |
((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
NAPI_GRO_CB(p)->flush |= flush;
}
NAPI_GRO_CB(skb)->flush |= flush;
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
pp = ops->gro_receive(head, skb);
out_unlock:
rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
static int inet_gro_complete(struct sk_buff *skb)
{
const struct net_protocol *ops;
struct iphdr *iph = ip_hdr(skb);
int proto = iph->protocol & (MAX_INET_PROTOS - 1);
int err = -ENOSYS;
__be16 newlen = htons(skb->len - skb_network_offset(skb));
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (WARN_ON(!ops || !ops->gro_complete))
goto out_unlock;
err = ops->gro_complete(skb);
out_unlock:
rcu_read_unlock();
return err;
}
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol,
struct net *net)
{
struct socket *sock;
int rc = sock_create_kern(family, type, protocol, &sock);
if (rc == 0) {
*sk = sock->sk;
(*sk)->sk_allocation = GFP_ATOMIC;
/*
* Unhash it so that IP input processing does not even see it,
* we do not wish this socket to see incoming packets.
*/
(*sk)->sk_prot->unhash(*sk);
sk_change_net(*sk, net);
}
return rc;
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
unsigned long snmp_fold_field(void __percpu *mib[], int offt)
{
unsigned long res = 0;
int i;
for_each_possible_cpu(i) {
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
}
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
#if BITS_PER_LONG==32
u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
{
u64 res = 0;
int cpu;
for_each_possible_cpu(cpu) {
void *bhptr, *userptr;
struct u64_stats_sync *syncp;
u64 v_bh, v_user;
unsigned int start;
/* first mib used by softirq context, we must use _bh() accessors */
bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin_bh(syncp);
v_bh = *(((u64 *) bhptr) + offt);
} while (u64_stats_fetch_retry_bh(syncp, start));
/* second mib used in USER context */
userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
do {
start = u64_stats_fetch_begin(syncp);
v_user = *(((u64 *) userptr) + offt);
} while (u64_stats_fetch_retry(syncp, start));
res += v_bh + v_user;
}
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field64);
#endif
int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
{
BUG_ON(ptr == NULL);
ptr[0] = __alloc_percpu(mibsize, align);
if (!ptr[0])
goto err0;
ptr[1] = __alloc_percpu(mibsize, align);
if (!ptr[1])
goto err1;
return 0;
err1:
free_percpu(ptr[0]);
ptr[0] = NULL;
err0:
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
void snmp_mib_free(void __percpu *ptr[2])
{
BUG_ON(ptr == NULL);
free_percpu(ptr[0]);
free_percpu(ptr[1]);
ptr[0] = ptr[1] = NULL;
}
EXPORT_SYMBOL_GPL(snmp_mib_free);
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
.netns_ok = 1,
};
#endif
static const struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.gso_send_check = tcp_v4_gso_send_check,
.gso_segment = tcp_tso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
.no_policy = 1,
.netns_ok = 1,
};
static const struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.gso_send_check = udp4_ufo_send_check,
.gso_segment = udp4_ufo_fragment,
.no_policy = 1,
.netns_ok = 1,
};
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.no_policy = 1,
.netns_ok = 1,
};
static __net_init int ipv4_mib_init_net(struct net *net)
{
if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
sizeof(struct tcp_mib),
__alignof__(struct tcp_mib)) < 0)
goto err_tcp_mib;
if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
goto err_ip_mib;
if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
sizeof(struct linux_mib),
__alignof__(struct linux_mib)) < 0)
goto err_net_mib;
if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
goto err_udp_mib;
if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
goto err_udplite_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
sizeof(struct icmp_mib),
__alignof__(struct icmp_mib)) < 0)
goto err_icmp_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
sizeof(struct icmpmsg_mib),
__alignof__(struct icmpmsg_mib)) < 0)
goto err_icmpmsg_mib;
tcp_mib_init(net);
return 0;
err_icmpmsg_mib:
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
err_icmp_mib:
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
err_udplite_mib:
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
err_udp_mib:
snmp_mib_free((void __percpu **)net->mib.net_statistics);
err_net_mib:
snmp_mib_free((void __percpu **)net->mib.ip_statistics);
err_ip_mib:
snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
err_tcp_mib:
return -ENOMEM;
}
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
snmp_mib_free((void __percpu **)net->mib.net_statistics);
snmp_mib_free((void __percpu **)net->mib.ip_statistics);
snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
.init = ipv4_mib_init_net,
.exit = ipv4_mib_exit_net,
};
static int __init init_ipv4_mibs(void)
{
return register_pernet_subsys(&ipv4_mib_ops);
}
static int ipv4_proc_init(void);
/*
* IP protocol layer initialiser
*/
static struct packet_type ip_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.func = ip_rcv,
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
};
static int __init inet_init(void)
{
struct sk_buff *dummy_skb;
struct inet_protosw *q;
struct list_head *r;
int rc = -EINVAL;
BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
if (!sysctl_local_reserved_ports)
goto out;
rc = proto_register(&tcp_prot, 1);
if (rc)
goto out_free_reserved_ports;
rc = proto_register(&udp_prot, 1);
if (rc)
goto out_unregister_tcp_proto;
rc = proto_register(&raw_prot, 1);
if (rc)
goto out_unregister_udp_proto;
/*
* Tell SOCKET that we are alive...
*/
(void)sock_register(&inet_family_ops);
#ifdef CONFIG_SYSCTL
ip_static_sysctl_init();
#endif
/*
* Add all the base protocols.
*/
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
printk(KERN_CRIT "inet_init: Cannot add ICMP protocol\n");
if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
printk(KERN_CRIT "inet_init: Cannot add UDP protocol\n");
if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
printk(KERN_CRIT "inet_init: Cannot add TCP protocol\n");
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
printk(KERN_CRIT "inet_init: Cannot add IGMP protocol\n");
#endif
/* Register the socket-side information for inet_create. */
for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
inet_register_protosw(q);
/*
* Set the ARP module up
*/
arp_init();
/*
* Set the IP module up
*/
ip_init();
tcp_v4_init();
/* Setup TCP slab cache for open requests. */
tcp_init();
/* Setup UDP memory threshold */
udp_init();
/* Add UDP-Lite (RFC 3828) */
udplite4_register();
/*
* Set the ICMP layer up
*/
if (icmp_init() < 0)
panic("Failed to create the ICMP control socket.\n");
/*
* Initialise the multicast router
*/
#if defined(CONFIG_IP_MROUTE)
if (ip_mr_init())
printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n");
#endif
/*
* Initialise per-cpu ipv4 mibs
*/
if (init_ipv4_mibs())
printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n");
ipv4_proc_init();
ipfrag_init();
dev_add_pack(&ip_packet_type);
rc = 0;
out:
return rc;
out_unregister_udp_proto:
proto_unregister(&udp_prot);
out_unregister_tcp_proto:
proto_unregister(&tcp_prot);
out_free_reserved_ports:
kfree(sysctl_local_reserved_ports);
goto out;
}
fs_initcall(inet_init);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
static int __init ipv4_proc_init(void)
{
int rc = 0;
if (raw_proc_init())
goto out_raw;
if (tcp4_proc_init())
goto out_tcp;
if (udp4_proc_init())
goto out_udp;
if (ip_misc_proc_init())
goto out_misc;
out:
return rc;
out_misc:
udp4_proc_exit();
out_udp:
tcp4_proc_exit();
out_tcp:
raw_proc_exit();
out_raw:
rc = -ENOMEM;
goto out;
}
#else /* CONFIG_PROC_FS */
static int __init ipv4_proc_init(void)
{
return 0;
}
#endif /* CONFIG_PROC_FS */
MODULE_ALIAS_NETPROTO(PF_INET);
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PF_INET protocol family socket handler.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Florian La Roche, <flla@stud.uni-sb.de>
* Alan Cox, <A.Cox@swansea.ac.uk>
*
* Changes (see also sock.c)
*
* piggy,
* Karl Knutson : Socket protocol table
* A.N.Kuznetsov : Socket death error in accept().
* John Richardson : Fix non blocking error in connect()
* so sockets that fail to connect
* don't return -EINPROGRESS.
* Alan Cox : Asynchronous I/O support
* Alan Cox : Keep correct socket pointer on sock
* structures
* when accept() ed
* Alan Cox : Semantics of SO_LINGER aren't state
* moved to close when you look carefully.
* With this fixed and the accept bug fixed
* some RPC stuff seems happier.
* Niibe Yutaka : 4.4BSD style write async I/O
* Alan Cox,
* Tony Gale : Fixed reuse semantics.
* Alan Cox : bind() shouldn't abort existing but dead
* sockets. Stops FTP netin:.. I hope.
* Alan Cox : bind() works correctly for RAW sockets.
* Note that FreeBSD at least was broken
* in this respect so be careful with
* compatibility tests...
* Alan Cox : routing cache support
* Alan Cox : memzero the socket structure for
* compactness.
* Matt Day : nonblock connect error handler
* Alan Cox : Allow large numbers of pending sockets
* (eg for big web sites), but only if
* specifically application requested.
* Alan Cox : New buffering throughout IP. Used
* dumbly.
* Alan Cox : New buffering now used smartly.
* Alan Cox : BSD rather than common sense
* interpretation of listen.
* Germano Caronni : Assorted small races.
* Alan Cox : sendmsg/recvmsg basic support.
* Alan Cox : Only sendmsg/recvmsg now supported.
* Alan Cox : Locked down bind (see security list).
* Alan Cox : Loosened bind a little.
* Mike McLagan : ADD/DEL DLCI Ioctls
* Willy Konynenberg : Transparent proxying support.
* David S. Miller : New socket lookup architecture.
* Some other random speedups.
* Cyrus Durgin : Cleaned up file for kmod hacks.
* Andi Kleen : Fix inet_stream_connect TCP race.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/inet.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/inet_connection_sock.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/raw.h>
#include <net/icmp.h>
#include <net/ipip.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
*/
static struct list_head inetsw[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw_lock);
struct ipv4_config ipv4_config;
EXPORT_SYMBOL(ipv4_config);
/* New destruction routine */
void inet_sock_destruct(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
pr_err("Attempt to release TCP socket in state %d %p\n",
sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive inet socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
/*
* The routines beyond this point handle the behaviour of an AF_INET
* socket object. Mostly it punts to the subprotocols of IP to do
* the work.
*/
/*
* Automatically bind an unbound socket.
*/
static int inet_autobind(struct sock *sk)
{
struct inet_sock *inet;
/* We may need to bind the socket. */
lock_sock(sk);
inet = inet_sk(sk);
if (!inet->inet_num) {
if (sk->sk_prot->get_port(sk, 0)) {
release_sock(sk);
return -EAGAIN;
}
inet->inet_sport = htons(inet->inet_num);
}
release_sock(sk);
return 0;
}
/*
* Move a socket into listening state.
*/
int inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
unsigned char old_state;
int err;
lock_sock(sk);
err = -EINVAL;
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
goto out;
old_state = sk->sk_state;
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
if (old_state != TCP_LISTEN) {
err = inet_csk_listen_start(sk, backlog);
if (err)
goto out;
}
sk->sk_max_ack_backlog = backlog;
err = 0;
out:
release_sock(sk);
return err;
}
EXPORT_SYMBOL(inet_listen);
u32 inet_ehash_secret __read_mostly;
EXPORT_SYMBOL(inet_ehash_secret);
/*
* inet_ehash_secret must be set exactly once
*/
void build_ehash_secret(void)
{
u32 rnd;
do {
get_random_bytes(&rnd, sizeof(rnd));
} while (rnd == 0);
cmpxchg(&inet_ehash_secret, 0, rnd);
}
EXPORT_SYMBOL(build_ehash_secret);
static inline int inet_netns_ok(struct net *net, int protocol)
{
int hash;
const struct net_protocol *ipprot;
if (net_eq(net, &init_net))
return 1;
hash = protocol & (MAX_INET_PROTOS - 1);
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot == NULL)
/* raw IP is OK */
return 1;
return ipprot->netns_ok;
}
/*
* Create an inet socket.
*/
static int inet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct inet_protosw *answer;
struct inet_sock *inet;
struct proto *answer_prot;
unsigned char answer_flags;
char answer_no_check;
int try_loading_module = 0;
int err;
if (unlikely(!inet_ehash_secret))
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
build_ehash_secret();
sock->state = SS_UNCONNECTED;
/* Look for the requested type/protocol pair. */
lookup_protocol:
err = -ESOCKTNOSUPPORT;
rcu_read_lock();
list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
err = 0;
/* Check the non-wild match. */
if (protocol == answer->protocol) {
if (protocol != IPPROTO_IP)
break;
} else {
/* Check for the two wild cases. */
if (IPPROTO_IP == protocol) {
protocol = answer->protocol;
break;
}
if (IPPROTO_IP == answer->protocol)
break;
}
err = -EPROTONOSUPPORT;
}
if (unlikely(err)) {
if (try_loading_module < 2) {
rcu_read_unlock();
/*
* Be more specific, e.g. net-pf-2-proto-132-type-1
* (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
*/
if (++try_loading_module == 1)
request_module("net-pf-%d-proto-%d-type-%d",
PF_INET, protocol, sock->type);
/*
* Fall back to generic, e.g. net-pf-2-proto-132
* (net-pf-PF_INET-proto-IPPROTO_SCTP)
*/
else
request_module("net-pf-%d-proto-%d",
PF_INET, protocol);
goto lookup_protocol;
} else
goto out_rcu_unlock;
}
err = -EPERM;
if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
goto out_rcu_unlock;
err = -EAFNOSUPPORT;
if (!inet_netns_ok(net, protocol))
goto out_rcu_unlock;
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_no_check = answer->no_check;
answer_flags = answer->flags;
rcu_read_unlock();
WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
if (sk == NULL)
goto out;
err = 0;
sk->sk_no_check = answer_no_check;
if (INET_PROTOSW_REUSE & answer_flags)
sk->sk_reuse = 1;
inet = inet_sk(sk);
inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
inet->nodefrag = 0;
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
inet->hdrincl = 1;
}
if (ipv4_config.no_pmtu_disc)
inet->pmtudisc = IP_PMTUDISC_DONT;
else
inet->pmtudisc = IP_PMTUDISC_WANT;
inet->inet_id = 0;
sock_init_data(sock, sk);
sk->sk_destruct = inet_sock_destruct;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
inet->uc_ttl = -1;
inet->mc_loop = 1;
inet->mc_ttl = 1;
inet->mc_all = 1;
inet->mc_index = 0;
inet->mc_list = NULL;
sk_refcnt_debug_inc(sk);
if (inet->inet_num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
* creation time automatically
* shares.
*/
inet->inet_sport = htons(inet->inet_num);
/* Add to protocol hash chains. */
sk->sk_prot->hash(sk);
}
if (sk->sk_prot->init) {
err = sk->sk_prot->init(sk);
if (err)
sk_common_release(sk);
}
out:
return err;
out_rcu_unlock:
rcu_read_unlock();
goto out;
}
/*
* The peer socket should always be NULL (or else). When we call this
* function we are destroying the object and from then on nobody
* should refer to it.
*/
int inet_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (sk) {
long timeout;
sock_rps_reset_flow(sk);
/* Applications forget to leave groups before exiting */
ip_mc_drop_socket(sk);
/* If linger is set, we don't return until the close
* is complete. Otherwise we return immediately. The
* actually closing is done the same either way.
*
* If the close is due to the process exiting, we never
* linger..
*/
timeout = 0;
if (sock_flag(sk, SOCK_LINGER) &&
!(current->flags & PF_EXITING))
timeout = sk->sk_lingertime;
sock->sk = NULL;
sk->sk_prot->close(sk, timeout);
}
return 0;
}
EXPORT_SYMBOL(inet_release);
/* It is off by default, see below. */
int sysctl_ip_nonlocal_bind __read_mostly;
EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
unsigned short snum;
int chk_addr_ret;
int err;
/* If the socket has its own bind function then use it. (RAW) */
if (sk->sk_prot->bind) {
err = sk->sk_prot->bind(sk, uaddr, addr_len);
goto out;
}
err = -EINVAL;
if (addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
/* Not specified by any standard per-se, however it breaks too
* many applications when removed. It is unfortunate since
* allowing applications to make a non-local bind solves
* several problems with systems using dynamic addressing.
* (ie. your servers still start up even if your ISDN link
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
if (!sysctl_ip_nonlocal_bind &&
!(inet->freebind || inet->transparent) &&
addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
chk_addr_ret != RTN_BROADCAST)
goto out;
snum = ntohs(addr->sin_port);
err = -EACCES;
if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
goto out;
/* We keep a pair of addresses. rcv_saddr is the one
* used by hash lookups, and saddr is used for transmit.
*
* In the BSD API these are the same except where it
* would be illegal to use them (multicast/broadcast) in
* which case the sending device address is used.
*/
lock_sock(sk);
/* Check these errors (active socket, double bind). */
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE || inet->inet_num)
goto out_release_sock;
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
if (sk->sk_prot->get_port(sk, snum)) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
}
if (inet->inet_rcv_saddr)
sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
if (snum)
sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
inet->inet_sport = htons(inet->inet_num);
inet->inet_daddr = 0;
inet->inet_dport = 0;
sk_dst_reset(sk);
err = 0;
out_release_sock:
release_sock(sk);
out:
return err;
}
EXPORT_SYMBOL(inet_bind);
int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC)
return sk->sk_prot->disconnect(sk, flags);
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
EXPORT_SYMBOL(inet_dgram_connect);
static long inet_wait_for_connect(struct sock *sk, long timeo)
{
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
* Connect() does not allow to get error notifications
* without closing the socket.
*/
while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (signal_pending(current) || !timeo)
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
return timeo;
}
/*
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
*/
int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
int err;
long timeo;
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
lock_sock(sk);
if (uaddr->sa_family == AF_UNSPEC) {
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
}
switch (sock->state) {
default:
err = -EINVAL;
goto out;
case SS_CONNECTED:
err = -EISCONN;
goto out;
case SS_CONNECTING:
err = -EALREADY;
/* Fall out of switch with err, set for this state */
break;
case SS_UNCONNECTED:
err = -EISCONN;
if (sk->sk_state != TCP_CLOSE)
goto out;
err = sk->sk_prot->connect(sk, uaddr, addr_len);
if (err < 0)
goto out;
sock->state = SS_CONNECTING;
/* Just entered SS_CONNECTING state; the only
* difference is that return value in non-blocking
* case is EINPROGRESS, rather than EALREADY.
*/
err = -EINPROGRESS;
break;
}
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo))
goto out;
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
}
/* Connection was closed by RST, timeout, ICMP error
* or another process disconnected us.
*/
if (sk->sk_state == TCP_CLOSE)
goto sock_error;
/* sk->sk_err may be not zero now, if RECVERR was ordered by user
* and error was received after socket entered established state.
* Hence, it is handled normally after connect() return successfully.
*/
sock->state = SS_CONNECTED;
err = 0;
out:
release_sock(sk);
return err;
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
}
EXPORT_SYMBOL(inet_stream_connect);
/*
* Accept a pending connection. The TCP layer now gives BSD semantics.
*/
int inet_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk1 = sock->sk;
int err = -EINVAL;
struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);
if (!sk2)
goto do_err;
lock_sock(sk2);
WARN_ON(!((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
sock_graft(sk2, newsock);
newsock->state = SS_CONNECTED;
err = 0;
release_sock(sk2);
do_err:
return err;
}
EXPORT_SYMBOL(inet_accept);
/*
* This does both peername and sockname.
*/
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
sin->sin_family = AF_INET;
if (peer) {
if (!inet->inet_dport ||
(((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
peer == 1))
return -ENOTCONN;
sin->sin_port = inet->inet_dport;
sin->sin_addr.s_addr = inet->inet_daddr;
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
sin->sin_port = inet->inet_sport;
sin->sin_addr.s_addr = addr;
}
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*uaddr_len = sizeof(*sin);
return 0;
}
EXPORT_SYMBOL(inet_getname);
int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
return sk->sk_prot->sendmsg(iocb, sk, msg, size);
}
EXPORT_SYMBOL(inet_sendmsg);
ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* We may need to bind the socket. */
if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
inet_autobind(sk))
return -EAGAIN;
if (sk->sk_prot->sendpage)
return sk->sk_prot->sendpage(sk, page, offset, size, flags);
return sock_no_sendpage(sock, page, offset, size, flags);
}
EXPORT_SYMBOL(inet_sendpage);
int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
int addr_len = 0;
int err;
sock_rps_record_flow(sk);
err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
if (err >= 0)
msg->msg_namelen = addr_len;
return err;
}
EXPORT_SYMBOL(inet_recvmsg);
int inet_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
/* This should really check to make sure
* the socket is a TCP socket. (WHY AC...)
*/
how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
1->2 bit 2 snds.
2->3 */
if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
return -EINVAL;
lock_sock(sk);
if (sock->state == SS_CONNECTING) {
if ((1 << sk->sk_state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING;
else
sock->state = SS_CONNECTED;
}
switch (sk->sk_state) {
case TCP_CLOSE:
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
POLLHUP, even on eg. unconnected UDP sockets -- RR */
default:
sk->sk_shutdown |= how;
if (sk->sk_prot->shutdown)
sk->sk_prot->shutdown(sk, how);
break;
/* Remaining two branches are temporary solution for missing
* close() in multithreaded environment. It is _not_ a good idea,
* but we have no choice until close() is repaired at VFS level.
*/
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
/* Fall through */
case TCP_SYN_SENT:
err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
break;
}
/* Wake up anyone sleeping in poll. */
sk->sk_state_change(sk);
release_sock(sk);
return err;
}
EXPORT_SYMBOL(inet_shutdown);
/*
* ioctl() calls you can issue on an INET socket. Most of these are
* device configuration and stuff and very rarely used. Some ioctls
* pass on to the socket itself.
*
* NOTE: I like the idea of a module for the config stuff. ie ifconfig
* loads the devconfigure module does its configuring and unloads it.
* There's a good 20K of config code hanging around the kernel.
*/
int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = 0;
struct net *net = sock_net(sk);
switch (cmd) {
case SIOCGSTAMP:
err = sock_get_timestamp(sk, (struct timeval __user *)arg);
break;
case SIOCGSTAMPNS:
err = sock_get_timestampns(sk, (struct timespec __user *)arg);
break;
case SIOCADDRT:
case SIOCDELRT:
case SIOCRTMSG:
err = ip_rt_ioctl(net, cmd, (void __user *)arg);
break;
case SIOCDARP:
case SIOCGARP:
case SIOCSARP:
err = arp_ioctl(net, cmd, (void __user *)arg);
break;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
err = devinet_ioctl(net, cmd, (void __user *)arg);
break;
default:
if (sk->sk_prot->ioctl)
err = sk->sk_prot->ioctl(sk, cmd, arg);
else
err = -ENOIOCTLCMD;
break;
}
return err;
}
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = -ENOIOCTLCMD;
if (sk->sk_prot->compat_ioctl)
err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
return err;
}
#endif
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
.poll = tcp_poll,
.ioctl = inet_ioctl,
.listen = inet_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
.splice_read = tcp_splice_read,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_stream_ops);
const struct proto_ops inet_dgram_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
.poll = udp_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
EXPORT_SYMBOL(inet_dgram_ops);
/*
* For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
* udp_poll
*/
static const struct proto_ops inet_sockraw_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = inet_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
.compat_ioctl = inet_compat_ioctl,
#endif
};
static const struct net_proto_family inet_family_ops = {
.family = PF_INET,
.create = inet_create,
.owner = THIS_MODULE,
};
/* Upon startup we insert all the elements in inetsw_array[] into
* the linked list inetsw.
*/
static struct inet_protosw inetsw_array[] =
{
{
.type = SOCK_STREAM,
.protocol = IPPROTO_TCP,
.prot = &tcp_prot,
.ops = &inet_stream_ops,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
},
{
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udp_prot,
.ops = &inet_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
},
{
.type = SOCK_RAW,
.protocol = IPPROTO_IP, /* wild card */
.prot = &raw_prot,
.ops = &inet_sockraw_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
}
};
#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
void inet_register_protosw(struct inet_protosw *p)
{
struct list_head *lh;
struct inet_protosw *answer;
int protocol = p->protocol;
struct list_head *last_perm;
spin_lock_bh(&inetsw_lock);
if (p->type >= SOCK_MAX)
goto out_illegal;
/* If we are trying to override a permanent protocol, bail. */
answer = NULL;
last_perm = &inetsw[p->type];
list_for_each(lh, &inetsw[p->type]) {
answer = list_entry(lh, struct inet_protosw, list);
/* Check only the non-wild match. */
if (INET_PROTOSW_PERMANENT & answer->flags) {
if (protocol == answer->protocol)
break;
last_perm = lh;
}
answer = NULL;
}
if (answer)
goto out_permanent;
/* Add the new entry after the last permanent entry if any, so that
* the new entry does not override a permanent entry when matched with
* a wild-card protocol. But it is allowed to override any existing
* non-permanent entry. This means that when we remove this entry, the
* system automatically returns to the old behavior.
*/
list_add_rcu(&p->list, last_perm);
out:
spin_unlock_bh(&inetsw_lock);
return;
out_permanent:
printk(KERN_ERR "Attempt to override permanent protocol %d.\n",
protocol);
goto out;
out_illegal:
printk(KERN_ERR
"Ignoring attempt to register invalid socket type %d.\n",
p->type);
goto out;
}
EXPORT_SYMBOL(inet_register_protosw);
void inet_unregister_protosw(struct inet_protosw *p)
{
if (INET_PROTOSW_PERMANENT & p->flags) {
printk(KERN_ERR
"Attempt to unregister permanent protocol %d.\n",
p->protocol);
} else {
spin_lock_bh(&inetsw_lock);
list_del_rcu(&p->list);
spin_unlock_bh(&inetsw_lock);
synchronize_net();
}
}
EXPORT_SYMBOL(inet_unregister_protosw);
/*
* Shall we try to damage output packets if routing dev changes?
*/
int sysctl_ip_dynaddr __read_mostly;
static int inet_sk_reselect_saddr(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__be32 old_saddr = inet->inet_saddr;
__be32 daddr = inet->inet_daddr;
struct flowi4 fl4;
struct rtable *rt;
__be32 new_saddr;
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* Query new route. */
rt = ip_route_connect(&fl4, daddr, 0, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if, sk->sk_protocol,
inet->inet_sport, inet->inet_dport, sk, false);
if (IS_ERR(rt))
return PTR_ERR(rt);
sk_setup_caps(sk, &rt->dst);
new_saddr = rt->rt_src;
if (new_saddr == old_saddr)
return 0;
if (sysctl_ip_dynaddr > 1) {
printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n",
__func__, &old_saddr, &new_saddr);
}
inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
/*
* XXX The only one ugly spot where we need to
* XXX really change the sockets identity after
* XXX it has entered the hashes. -DaveM
*
* Besides that, it does not check for connection
* uniqueness. Wait for troubles.
*/
__sk_prot_rehash(sk);
return 0;
}
int inet_sk_rebuild_header(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
struct ip_options_rcu *inet_opt;
int err;
/* Route is OK, nothing to do. */
if (rt)
return 0;
/* Reroute. */
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
daddr = inet->inet_daddr;
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
rcu_read_unlock();
rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (!IS_ERR(rt)) {
err = 0;
sk_setup_caps(sk, &rt->dst);
} else {
err = PTR_ERR(rt);
/* Routing failed... */
sk->sk_route_caps = 0;
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
*/
if (!sysctl_ip_dynaddr ||
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
sk->sk_err_soft = -err;
}
return err;
}
EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
const struct iphdr *iph;
const struct net_protocol *ops;
int proto;
int ihl;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
proto = iph->protocol & (MAX_INET_PROTOS - 1);
err = -EPROTONOSUPPORT;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (likely(ops && ops->gso_send_check))
err = ops->gso_send_check(skb);
rcu_read_unlock();
out:
return err;
}
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
const struct net_protocol *ops;
int proto;
int ihl;
int id;
unsigned int offset = 0;
if (!(features & NETIF_F_V4_CSUM))
features &= ~NETIF_F_SG;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_TCPV4 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
SKB_GSO_TCP_ECN |
0)))
goto out;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
id = ntohs(iph->id);
proto = iph->protocol & (MAX_INET_PROTOS - 1);
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (likely(ops && ops->gso_segment))
segs = ops->gso_segment(skb, features);
rcu_read_unlock();
if (!segs || IS_ERR(segs))
goto out;
skb = segs;
do {
iph = ip_hdr(skb);
if (proto == IPPROTO_UDP) {
iph->id = htons(id);
iph->frag_off = htons(offset >> 3);
if (skb->next != NULL)
iph->frag_off |= htons(IP_MF);
offset += (skb->len - skb->mac_len - iph->ihl * 4);
} else
iph->id = htons(id++);
iph->tot_len = htons(skb->len - skb->mac_len);
iph->check = 0;
iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
} while ((skb = skb->next));
out:
return segs;
}
static struct sk_buff **inet_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
const struct net_protocol *ops;
struct sk_buff **pp = NULL;
struct sk_buff *p;
const struct iphdr *iph;
unsigned int hlen;
unsigned int off;
unsigned int id;
int flush = 1;
int proto;
off = skb_gro_offset(skb);
hlen = off + sizeof(*iph);
iph = skb_gro_header_fast(skb, off);
if (skb_gro_header_hard(skb, hlen)) {
iph = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!iph))
goto out;
}
proto = iph->protocol & (MAX_INET_PROTOS - 1);
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (!ops || !ops->gro_receive)
goto out_unlock;
if (*(u8 *)iph != 0x45)
goto out_unlock;
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto out_unlock;
id = ntohl(*(__be32 *)&iph->id);
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
id >>= 16;
for (p = *head; p; p = p->next) {
struct iphdr *iph2;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
iph2 = ip_hdr(p);
if ((iph->protocol ^ iph2->protocol) |
(iph->tos ^ iph2->tos) |
((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
/* All fields must match except length and checksum. */
NAPI_GRO_CB(p)->flush |=
(iph->ttl ^ iph2->ttl) |
((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
NAPI_GRO_CB(p)->flush |= flush;
}
NAPI_GRO_CB(skb)->flush |= flush;
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
pp = ops->gro_receive(head, skb);
out_unlock:
rcu_read_unlock();
out:
NAPI_GRO_CB(skb)->flush |= flush;
return pp;
}
static int inet_gro_complete(struct sk_buff *skb)
{
const struct net_protocol *ops;
struct iphdr *iph = ip_hdr(skb);
int proto = iph->protocol & (MAX_INET_PROTOS - 1);
int err = -ENOSYS;
__be16 newlen = htons(skb->len - skb_network_offset(skb));
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (WARN_ON(!ops || !ops->gro_complete))
goto out_unlock;
err = ops->gro_complete(skb);
out_unlock:
rcu_read_unlock();
return err;
}
int inet_ctl_sock_create(struct sock **sk, unsigned short family,
unsigned short type, unsigned char protocol,
struct net *net)
{
struct socket *sock;
int rc = sock_create_kern(family, type, protocol, &sock);
if (rc == 0) {
*sk = sock->sk;
(*sk)->sk_allocation = GFP_ATOMIC;
/*
* Unhash it so that IP input processing does not even see it,
* we do not wish this socket to see incoming packets.
*/
(*sk)->sk_prot->unhash(*sk);
sk_change_net(*sk, net);
}
return rc;
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
unsigned long snmp_fold_field(void __percpu *mib[], int offt)
{
unsigned long res = 0;
int i;
for_each_possible_cpu(i) {
res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
}
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
#if BITS_PER_LONG==32
u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
{
u64 res = 0;
int cpu;
for_each_possible_cpu(cpu) {
void *bhptr, *userptr;
struct u64_stats_sync *syncp;
u64 v_bh, v_user;
unsigned int start;
/* first mib used by softirq context, we must use _bh() accessors */
bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
start = u64_stats_fetch_begin_bh(syncp);
v_bh = *(((u64 *) bhptr) + offt);
} while (u64_stats_fetch_retry_bh(syncp, start));
/* second mib used in USER context */
userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
do {
start = u64_stats_fetch_begin(syncp);
v_user = *(((u64 *) userptr) + offt);
} while (u64_stats_fetch_retry(syncp, start));
res += v_bh + v_user;
}
return res;
}
EXPORT_SYMBOL_GPL(snmp_fold_field64);
#endif
int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
{
BUG_ON(ptr == NULL);
ptr[0] = __alloc_percpu(mibsize, align);
if (!ptr[0])
goto err0;
ptr[1] = __alloc_percpu(mibsize, align);
if (!ptr[1])
goto err1;
return 0;
err1:
free_percpu(ptr[0]);
ptr[0] = NULL;
err0:
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
void snmp_mib_free(void __percpu *ptr[2])
{
BUG_ON(ptr == NULL);
free_percpu(ptr[0]);
free_percpu(ptr[1]);
ptr[0] = ptr[1] = NULL;
}
EXPORT_SYMBOL_GPL(snmp_mib_free);
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
.netns_ok = 1,
};
#endif
static const struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.gso_send_check = tcp_v4_gso_send_check,
.gso_segment = tcp_tso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
.no_policy = 1,
.netns_ok = 1,
};
static const struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
.gso_send_check = udp4_ufo_send_check,
.gso_segment = udp4_ufo_fragment,
.no_policy = 1,
.netns_ok = 1,
};
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.no_policy = 1,
.netns_ok = 1,
};
static __net_init int ipv4_mib_init_net(struct net *net)
{
if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
sizeof(struct tcp_mib),
__alignof__(struct tcp_mib)) < 0)
goto err_tcp_mib;
if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
sizeof(struct ipstats_mib),
__alignof__(struct ipstats_mib)) < 0)
goto err_ip_mib;
if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
sizeof(struct linux_mib),
__alignof__(struct linux_mib)) < 0)
goto err_net_mib;
if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
goto err_udp_mib;
if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
sizeof(struct udp_mib),
__alignof__(struct udp_mib)) < 0)
goto err_udplite_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
sizeof(struct icmp_mib),
__alignof__(struct icmp_mib)) < 0)
goto err_icmp_mib;
if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
sizeof(struct icmpmsg_mib),
__alignof__(struct icmpmsg_mib)) < 0)
goto err_icmpmsg_mib;
tcp_mib_init(net);
return 0;
err_icmpmsg_mib:
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
err_icmp_mib:
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
err_udplite_mib:
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
err_udp_mib:
snmp_mib_free((void __percpu **)net->mib.net_statistics);
err_net_mib:
snmp_mib_free((void __percpu **)net->mib.ip_statistics);
err_ip_mib:
snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
err_tcp_mib:
return -ENOMEM;
}
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
snmp_mib_free((void __percpu **)net->mib.udp_statistics);
snmp_mib_free((void __percpu **)net->mib.net_statistics);
snmp_mib_free((void __percpu **)net->mib.ip_statistics);
snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
.init = ipv4_mib_init_net,
.exit = ipv4_mib_exit_net,
};
static int __init init_ipv4_mibs(void)
{
return register_pernet_subsys(&ipv4_mib_ops);
}
static int ipv4_proc_init(void);
/*
* IP protocol layer initialiser
*/
static struct packet_type ip_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.func = ip_rcv,
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
};
static int __init inet_init(void)
{
struct sk_buff *dummy_skb;
struct inet_protosw *q;
struct list_head *r;
int rc = -EINVAL;
BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
if (!sysctl_local_reserved_ports)
goto out;
rc = proto_register(&tcp_prot, 1);
if (rc)
goto out_free_reserved_ports;
rc = proto_register(&udp_prot, 1);
if (rc)
goto out_unregister_tcp_proto;
rc = proto_register(&raw_prot, 1);
if (rc)
goto out_unregister_udp_proto;
/*
* Tell SOCKET that we are alive...
*/
(void)sock_register(&inet_family_ops);
#ifdef CONFIG_SYSCTL
ip_static_sysctl_init();
#endif
/*
* Add all the base protocols.
*/
if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
printk(KERN_CRIT "inet_init: Cannot add ICMP protocol\n");
if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
printk(KERN_CRIT "inet_init: Cannot add UDP protocol\n");
if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
printk(KERN_CRIT "inet_init: Cannot add TCP protocol\n");
#ifdef CONFIG_IP_MULTICAST
if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
printk(KERN_CRIT "inet_init: Cannot add IGMP protocol\n");
#endif
/* Register the socket-side information for inet_create. */
for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r);
for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
inet_register_protosw(q);
/*
* Set the ARP module up
*/
arp_init();
/*
* Set the IP module up
*/
ip_init();
tcp_v4_init();
/* Setup TCP slab cache for open requests. */
tcp_init();
/* Setup UDP memory threshold */
udp_init();
/* Add UDP-Lite (RFC 3828) */
udplite4_register();
/*
* Set the ICMP layer up
*/
if (icmp_init() < 0)
panic("Failed to create the ICMP control socket.\n");
/*
* Initialise the multicast router
*/
#if defined(CONFIG_IP_MROUTE)
if (ip_mr_init())
printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n");
#endif
/*
* Initialise per-cpu ipv4 mibs
*/
if (init_ipv4_mibs())
printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n");
ipv4_proc_init();
ipfrag_init();
dev_add_pack(&ip_packet_type);
rc = 0;
out:
return rc;
out_unregister_udp_proto:
proto_unregister(&udp_prot);
out_unregister_tcp_proto:
proto_unregister(&tcp_prot);
out_free_reserved_ports:
kfree(sysctl_local_reserved_ports);
goto out;
}
fs_initcall(inet_init);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
static int __init ipv4_proc_init(void)
{
int rc = 0;
if (raw_proc_init())
goto out_raw;
if (tcp4_proc_init())
goto out_tcp;
if (udp4_proc_init())
goto out_udp;
if (ip_misc_proc_init())
goto out_misc;
out:
return rc;
out_misc:
udp4_proc_exit();
out_udp:
tcp4_proc_exit();
out_tcp:
raw_proc_exit();
out_raw:
rc = -ENOMEM;
goto out;
}
#else /* CONFIG_PROC_FS */
static int __init ipv4_proc_init(void)
{
return 0;
}
#endif /* CONFIG_PROC_FS */
MODULE_ALIAS_NETPROTO(PF_INET);
|
121,540,044,540,405 | /*
* NET3: Implementation of the ICMP protocol layer.
*
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Some of the function names and the icmp unreach table for this
* module were derived from [icmp.c 1.0.11 06/02/93] by
* Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
* Other than that this module is a complete rewrite.
*
* Fixes:
* Clemens Fruhwirth : introduce global icmp rate limiting
* with icmp type masking ability instead
* of broken per type icmp timeouts.
* Mike Shaver : RFC1122 checks.
* Alan Cox : Multicast ping reply as self.
* Alan Cox : Fix atomicity lockup in ip_build_xmit
* call.
* Alan Cox : Added 216,128 byte paths to the MTU
* code.
* Martin Mares : RFC1812 checks.
* Martin Mares : Can be configured to follow redirects
* if acting as a router _without_ a
* routing protocol (RFC 1812).
* Martin Mares : Echo requests may be configured to
* be ignored (RFC 1812).
* Martin Mares : Limitation of ICMP error message
* transmit rate (RFC 1812).
* Martin Mares : TOS and Precedence set correctly
* (RFC 1812).
* Martin Mares : Now copying as much data from the
* original packet as we can without
* exceeding 576 bytes (RFC 1812).
* Willy Konynenberg : Transparent proxying support.
* Keith Owens : RFC1191 correction for 4.2BSD based
* path MTU bug.
* Thomas Quinot : ICMP Dest Unreach codes up to 15 are
* valid (RFC 1812).
* Andi Kleen : Check all packet lengths properly
* and moved all kfree_skb() up to
* icmp_rcv.
* Andi Kleen : Move the rate limit bookkeeping
* into the dest entry and use a token
* bucket filter (thanks to ANK). Make
* the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly
* - ICMP header length was not accounted
* at all.
* Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
*
* To Fix:
*
* - Should use skb_pull() instead of all the manual checking.
* This would also greatly simply some upper layer error handlers. --AK
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <linux/slab.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/protocol.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
/*
* Build xmit assembly blocks
*/
struct icmp_bxm {
struct sk_buff *skb;
int offset;
int data_len;
struct {
struct icmphdr icmph;
__be32 times[3];
} data;
int head_len;
struct ip_options replyopts;
unsigned char optbuf[40];
};
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */
.fatal = 0,
},
{
.errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */,
.fatal = 1,
},
{
.errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */
.fatal = 1,
},
{
.errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */
.fatal = 0,
},
{
.errno = EOPNOTSUPP, /* ICMP_SR_FAILED */
.fatal = 0,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */
.fatal = 1,
},
{
.errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
.fatal = 1,
},
{
.errno = ENONET, /* ICMP_HOST_ISOLATED */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_ANO */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_ANO */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
.fatal = 1,
},
};
EXPORT_SYMBOL(icmp_err_convert);
/*
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control {
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
/*
* The ICMP socket(s). This is the most convenient way to flow control
* our ICMP output as well as maintain a clean interface throughout
* all layers. All Socketless IP sends will soon be gone.
*
* On SMP we have one ICMP socket per-cpu.
*/
static struct sock *icmp_sk(struct net *net)
{
return net->ipv4.icmp_sk[smp_processor_id()];
}
static inline struct sock *icmp_xmit_lock(struct net *net)
{
struct sock *sk;
local_bh_disable();
sk = icmp_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
local_bh_enable();
return NULL;
}
return sk;
}
static inline void icmp_xmit_unlock(struct sock *sk)
{
spin_unlock_bh(&sk->sk_lock.slock);
}
/*
* Send an ICMP frame.
*/
static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
int type, int code)
{
struct dst_entry *dst = &rt->dst;
bool rc = true;
if (type > NR_ICMP_TYPES)
goto out;
/* Don't limit PMTU discovery. */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
goto out;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
goto out;
/* Limit if icmp type is enabled in ratemask. */
if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
if (!rt->peer)
rt_bind_peer(rt, 1);
rc = inet_peer_xrlim_allow(rt->peer,
net->ipv4.sysctl_icmp_ratelimit);
}
out:
return rc;
}
/*
* Maintain the counters used in the SNMP statistics for outgoing ICMP
*/
void icmp_out_count(struct net *net, unsigned char type)
{
ICMPMSGOUT_INC_STATS(net, type);
ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
}
/*
* Checksum each fragment, and on the first include the headers and final
* checksum.
*/
static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
__wsum csum;
csum = skb_copy_and_csum_bits(icmp_param->skb,
icmp_param->offset + offset,
to, len, 0);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (icmp_pointers[icmp_param->data.icmph.type].error)
nf_ct_attach(skb, icmp_param->skb);
return 0;
}
static void icmp_push_reply(struct icmp_bxm *icmp_param,
struct ipcm_cookie *ipc, struct rtable **rt)
{
struct sock *sk;
struct sk_buff *skb;
sk = icmp_sk(dev_net((*rt)->dst.dev));
if (ip_append_data(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT) < 0) {
ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
struct sk_buff *skb1;
skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len, csum);
icmph->checksum = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
}
/*
* Driving logic for building and sending ICMP messages.
*/
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct sock *sk;
struct inet_sock *inet;
__be32 daddr;
if (ip_options_echo(&icmp_param->replyopts, skb))
return;
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (icmp_param->replyopts.optlen) {
ipc.opt = &icmp_param->replyopts;
if (ipc.opt->srr)
daddr = icmp_param->replyopts.faddr;
}
{
struct flowi4 fl4 = {
.daddr = daddr,
.saddr = rt->rt_spec_dst,
.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
.flowi4_proto = IPPROTO_ICMP,
};
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
}
if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
icmp_param->data.icmph.code))
icmp_push_reply(icmp_param, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
}
static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
int type, int code,
struct icmp_bxm *param)
{
struct flowi4 fl4 = {
.daddr = (param->replyopts.srr ?
param->replyopts.faddr : iph->saddr),
.saddr = saddr,
.flowi4_tos = RT_TOS(tos),
.flowi4_proto = IPPROTO_ICMP,
.fl4_icmp_type = type,
.fl4_icmp_code = code,
};
struct rtable *rt, *rt2;
int err;
security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4));
rt = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
if (!fl4.saddr)
fl4.saddr = rt->rt_src;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(&fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl4.daddr, fl4.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
/*
* Send an ICMP message in response to a situation
*
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address.
* MUST reply to only the first fragment.
*/
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct iphdr *iph;
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
struct ipcm_cookie ipc;
__be32 saddr;
u8 tos;
struct net *net;
struct sock *sk;
if (!rt)
goto out;
net = dev_net(rt->dst.dev);
/*
* Find the original header. It is expected to be valid, of course.
* Check this, icmp_send is called from the most obscure devices
* sometimes.
*/
iph = ip_hdr(skb_in);
if ((u8 *)iph < skb_in->head ||
(skb_in->network_header + sizeof(*iph)) > skb_in->tail)
goto out;
/*
* No replies to physical multicast/broadcast
*/
if (skb_in->pkt_type != PACKET_HOST)
goto out;
/*
* Now check at the protocol level
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto out;
/*
* Only reply to fragment 0. We byte re-order the constant
* mask for efficiency.
*/
if (iph->frag_off & htons(IP_OFFSET))
goto out;
/*
* If we send an ICMP error to an ICMP error a mess would result..
*/
if (icmp_pointers[type].error) {
/*
* We are an error, check if we are replying to an
* ICMP error
*/
if (iph->protocol == IPPROTO_ICMP) {
u8 _inner_type, *itp;
itp = skb_header_pointer(skb_in,
skb_network_header(skb_in) +
(iph->ihl << 2) +
offsetof(struct icmphdr,
type) -
skb_in->data,
sizeof(_inner_type),
&_inner_type);
if (itp == NULL)
goto out;
/*
* Assume any unknown ICMP type is an error. This
* isn't specified by the RFC, but think about it..
*/
if (*itp > NR_ICMP_TYPES ||
icmp_pointers[*itp].error)
goto out;
}
}
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
/*
* Construct source address and options.
*/
saddr = iph->daddr;
if (!(rt->rt_flags & RTCF_LOCAL)) {
struct net_device *dev = NULL;
rcu_read_lock();
if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
dev = dev_get_by_index_rcu(net, rt->rt_iif);
if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
}
tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
if (ip_options_echo(&icmp_param.replyopts, skb_in))
goto out_unlock;
/*
* Prepare data for ICMP header.
*/
icmp_param.data.icmph.type = type;
icmp_param.data.icmph.code = code;
icmp_param.data.icmph.un.gateway = info;
icmp_param.data.icmph.checksum = 0;
icmp_param.skb = skb_in;
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts;
ipc.tx_flags = 0;
rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
type, code, &icmp_param);
if (IS_ERR(rt))
goto out_unlock;
if (!icmpv4_xrlim_allow(net, rt, type, code))
goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */
room = dst_mtu(&rt->dst);
if (room > 576)
room = 576;
room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
room -= sizeof(struct icmphdr);
icmp_param.data_len = skb_in->len - icmp_param.offset;
if (icmp_param.data_len > room)
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_push_reply(&icmp_param, &ipc, &rt);
ende:
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
out:;
}
EXPORT_SYMBOL(icmp_send);
/*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
*/
static void icmp_unreach(struct sk_buff *skb)
{
const struct iphdr *iph;
struct icmphdr *icmph;
int hash, protocol;
const struct net_protocol *ipprot;
u32 info = 0;
struct net *net;
net = dev_net(skb_dst(skb)->dev);
/*
* Incomplete header ?
* Only checks for the IP header, there should be an
* additional check for longer headers in upper levels.
*/
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out_err;
icmph = icmp_hdr(skb);
iph = (const struct iphdr *)skb->data;
if (iph->ihl < 5) /* Mangled header, drop. */
goto out_err;
if (icmph->type == ICMP_DEST_UNREACH) {
switch (icmph->code & 15) {
case ICMP_NET_UNREACH:
case ICMP_HOST_UNREACH:
case ICMP_PROT_UNREACH:
case ICMP_PORT_UNREACH:
break;
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n",
&iph->daddr);
} else {
info = ip_rt_frag_needed(net, iph,
ntohs(icmph->un.frag.mtu),
skb->dev);
if (!info)
goto out;
}
break;
case ICMP_SR_FAILED:
LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n",
&iph->daddr);
break;
default:
break;
}
if (icmph->code > NR_ICMP_UNREACH)
goto out;
} else if (icmph->type == ICMP_PARAMETERPROB)
info = ntohl(icmph->un.gateway) >> 24;
/*
* Throw it at our lower layers
*
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
* header.
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
* transport layer.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
* transport layer.
*/
/*
* Check the other end isn't violating RFC 1122. Some routers send
* bogus responses to broadcast frames. If you see this message
* first check your netmask matches at both ends, if it does then
* get the other vendor to fix their kit.
*/
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
if (net_ratelimit())
printk(KERN_WARNING "%pI4 sent an invalid ICMP "
"type %u, code %u "
"error to a broadcast: %pI4 on %s\n",
&ip_hdr(skb)->saddr,
icmph->type, icmph->code,
&iph->daddr,
skb->dev->name);
goto out;
}
/* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers.
*/
if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
goto out;
iph = (const struct iphdr *)skb->data;
protocol = iph->protocol;
/*
* Deliver ICMP message to raw sockets. Pretty useless feature?
*/
raw_icmp_error(skb, protocol, info);
hash = protocol & (MAX_INET_PROTOS - 1);
rcu_read_lock();
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, info);
rcu_read_unlock();
out:
return;
out_err:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_REDIRECT.
*/
static void icmp_redirect(struct sk_buff *skb)
{
const struct iphdr *iph;
if (skb->len < sizeof(struct iphdr))
goto out_err;
/*
* Get the copied header of the packet that caused the redirect
*/
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
iph = (const struct iphdr *)skb->data;
switch (icmp_hdr(skb)->code & 7) {
case ICMP_REDIR_NET:
case ICMP_REDIR_NETTOS:
/*
* As per RFC recommendations now handle it as a host redirect.
*/
case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS:
ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
icmp_hdr(skb)->un.gateway,
iph->saddr, skb->dev);
break;
}
out:
return;
out_err:
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
* requests.
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
* included in the reply.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
* echo requests, MUST have default=NOT.
* See also WRT handling of options once they are done and working.
*/
static void icmp_echo(struct sk_buff *skb)
{
struct net *net;
net = dev_net(skb_dst(skb)->dev);
if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
struct icmp_bxm icmp_param;
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_ECHOREPLY;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
}
}
/*
* Handle ICMP Timestamp requests.
* RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
* SHOULD be in the kernel for minimum random latency.
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
*/
static void icmp_timestamp(struct sk_buff *skb)
{
struct timespec tv;
struct icmp_bxm icmp_param;
/*
* Too short.
*/
if (skb->len < 4)
goto out_err;
/*
* Fill in the current time as ms since midnight UT:
*/
getnstimeofday(&tv);
icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
tv.tv_nsec / NSEC_PER_MSEC);
icmp_param.data.times[2] = icmp_param.data.times[1];
if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
BUG();
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
icmp_param.data.icmph.code = 0;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
out:
return;
out_err:
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_ADDRESS_MASK requests. (RFC950)
*
* RFC1122 (3.2.2.9). A host MUST only send replies to
* ADDRESS_MASK requests if it's been configured as an address mask
* agent. Receiving a request doesn't constitute implicit permission to
* act as one. Of course, implementing this correctly requires (SHOULD)
* a way to turn the functionality on and off. Another one for sysctl(),
* I guess. -- MS
*
* RFC1812 (4.3.3.9). A router MUST implement it.
* A router SHOULD have switch turning it on/off.
* This switch MUST be ON by default.
*
* Gratuitous replies, zero-source replies are not implemented,
* that complies with RFC. DO NOT implement them!!! All the idea
* of broadcast addrmask replies as specified in RFC950 is broken.
* The problem is that it is not uncommon to have several prefixes
* on one physical interface. Moreover, addrmask agent can even be
* not aware of existing another prefixes.
* If source is zero, addrmask agent cannot choose correct prefix.
* Gratuitous mask announcements suffer from the same problem.
* RFC1812 explains it, but still allows to use ADDRMASK,
* that is pretty silly. --ANK
*
* All these rules are so bizarre, that I removed kernel addrmask
* support at all. It is wrong, it is obsolete, nobody uses it in
* any case. --ANK
*
* Furthermore you can do it with a usermode address agent program
* anyway...
*/
static void icmp_address(struct sk_buff *skb)
{
#if 0
if (net_ratelimit())
printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
#endif
}
/*
* RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
* loudly if an inconsistency is found.
* called with rcu_read_lock()
*/
static void icmp_address_reply(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct in_ifaddr *ifa;
if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
return;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
if (in_dev->ifa_list &&
IN_DEV_LOG_MARTIANS(in_dev) &&
IN_DEV_FORWARD(in_dev)) {
__be32 _mask, *mp;
mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
BUG_ON(mp == NULL);
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (*mp == ifa->ifa_mask &&
inet_ifa_match(rt->rt_src, ifa))
break;
}
if (!ifa && net_ratelimit()) {
printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
mp, dev->name, &rt->rt_src);
}
}
}
static void icmp_discard(struct sk_buff *skb)
{
}
/*
* Deal with incoming ICMP packets.
*/
int icmp_rcv(struct sk_buff *skb)
{
struct icmphdr *icmph;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
int nh;
if (!(sp && sp->xvec[sp->len - 1]->props.flags &
XFRM_STATE_ICMP))
goto drop;
if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
goto drop;
nh = skb_network_offset(skb);
skb_set_network_header(skb, sizeof(*icmph));
if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
goto drop;
skb_set_network_header(skb, nh);
}
ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto error;
}
if (!pskb_pull(skb, sizeof(*icmph)))
goto error;
icmph = icmp_hdr(skb);
ICMPMSGIN_INC_STATS_BH(net, icmph->type);
/*
* 18 is the highest 'known' ICMP type. Anything else is a mystery
*
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES)
goto error;
/*
* Parse the ICMP message
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
/*
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* silently ignored (we let user decide with a sysctl).
* RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
* discarded if to broadcast/multicast.
*/
if ((icmph->type == ICMP_ECHO ||
icmph->type == ICMP_TIMESTAMP) &&
net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
goto error;
}
if (icmph->type != ICMP_ECHO &&
icmph->type != ICMP_TIMESTAMP &&
icmph->type != ICMP_ADDRESS &&
icmph->type != ICMP_ADDRESSREPLY) {
goto error;
}
}
icmp_pointers[icmph->type].handler(skb);
drop:
kfree_skb(skb);
return 0;
error:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
goto drop;
}
/*
* This table is the definition of how we handle ICMP.
*/
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
.handler = icmp_discard,
},
[1] = {
.handler = icmp_discard,
.error = 1,
},
[2] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
.handler = icmp_redirect,
.error = 1,
},
[6] = {
.handler = icmp_discard,
.error = 1,
},
[7] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
.handler = icmp_echo,
},
[9] = {
.handler = icmp_discard,
.error = 1,
},
[10] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
.handler = icmp_address,
},
[ICMP_ADDRESSREPLY] = {
.handler = icmp_address_reply,
},
};
static void __net_exit icmp_sk_exit(struct net *net)
{
int i;
for_each_possible_cpu(i)
inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
kfree(net->ipv4.icmp_sk);
net->ipv4.icmp_sk = NULL;
}
static int __net_init icmp_sk_init(struct net *net)
{
int i, err;
net->ipv4.icmp_sk =
kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
if (net->ipv4.icmp_sk == NULL)
return -ENOMEM;
for_each_possible_cpu(i) {
struct sock *sk;
err = inet_ctl_sock_create(&sk, PF_INET,
SOCK_RAW, IPPROTO_ICMP, net);
if (err < 0)
goto fail;
net->ipv4.icmp_sk[i] = sk;
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
*/
sk->sk_sndbuf =
(2 * ((64 * 1024) + sizeof(struct sk_buff)));
/*
* Speedup sock_wfree()
*/
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
}
/* Control parameters for ECHO replies. */
net->ipv4.sysctl_icmp_echo_ignore_all = 0;
net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
/* Control parameter - ignore bogus broadcast responses? */
net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
/*
* Configurable global rate limit.
*
* ratelimit defines tokens/packet consumed for dst->rate_token
* bucket ratemask defines which icmp types are ratelimited by
* setting it's bit position.
*
* default:
* dest unreachable (3), source quench (4),
* time exceeded (11), parameter problem (12)
*/
net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
net->ipv4.sysctl_icmp_ratemask = 0x1818;
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
return 0;
fail:
for_each_possible_cpu(i)
inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
kfree(net->ipv4.icmp_sk);
return err;
}
static struct pernet_operations __net_initdata icmp_sk_ops = {
.init = icmp_sk_init,
.exit = icmp_sk_exit,
};
int __init icmp_init(void)
{
return register_pernet_subsys(&icmp_sk_ops);
}
| /*
* NET3: Implementation of the ICMP protocol layer.
*
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Some of the function names and the icmp unreach table for this
* module were derived from [icmp.c 1.0.11 06/02/93] by
* Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
* Other than that this module is a complete rewrite.
*
* Fixes:
* Clemens Fruhwirth : introduce global icmp rate limiting
* with icmp type masking ability instead
* of broken per type icmp timeouts.
* Mike Shaver : RFC1122 checks.
* Alan Cox : Multicast ping reply as self.
* Alan Cox : Fix atomicity lockup in ip_build_xmit
* call.
* Alan Cox : Added 216,128 byte paths to the MTU
* code.
* Martin Mares : RFC1812 checks.
* Martin Mares : Can be configured to follow redirects
* if acting as a router _without_ a
* routing protocol (RFC 1812).
* Martin Mares : Echo requests may be configured to
* be ignored (RFC 1812).
* Martin Mares : Limitation of ICMP error message
* transmit rate (RFC 1812).
* Martin Mares : TOS and Precedence set correctly
* (RFC 1812).
* Martin Mares : Now copying as much data from the
* original packet as we can without
* exceeding 576 bytes (RFC 1812).
* Willy Konynenberg : Transparent proxying support.
* Keith Owens : RFC1191 correction for 4.2BSD based
* path MTU bug.
* Thomas Quinot : ICMP Dest Unreach codes up to 15 are
* valid (RFC 1812).
* Andi Kleen : Check all packet lengths properly
* and moved all kfree_skb() up to
* icmp_rcv.
* Andi Kleen : Move the rate limit bookkeeping
* into the dest entry and use a token
* bucket filter (thanks to ANK). Make
* the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly
* - ICMP header length was not accounted
* at all.
* Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
*
* To Fix:
*
* - Should use skb_pull() instead of all the manual checking.
* This would also greatly simply some upper layer error handlers. --AK
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <linux/slab.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/protocol.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
/*
* Build xmit assembly blocks
*/
struct icmp_bxm {
struct sk_buff *skb;
int offset;
int data_len;
struct {
struct icmphdr icmph;
__be32 times[3];
} data;
int head_len;
struct ip_options_data replyopts;
};
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */
.fatal = 0,
},
{
.errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */,
.fatal = 1,
},
{
.errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */
.fatal = 1,
},
{
.errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */
.fatal = 0,
},
{
.errno = EOPNOTSUPP, /* ICMP_SR_FAILED */
.fatal = 0,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */
.fatal = 1,
},
{
.errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
.fatal = 1,
},
{
.errno = ENONET, /* ICMP_HOST_ISOLATED */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_ANO */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_ANO */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
.fatal = 1,
},
};
EXPORT_SYMBOL(icmp_err_convert);
/*
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control {
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
/*
* The ICMP socket(s). This is the most convenient way to flow control
* our ICMP output as well as maintain a clean interface throughout
* all layers. All Socketless IP sends will soon be gone.
*
* On SMP we have one ICMP socket per-cpu.
*/
static struct sock *icmp_sk(struct net *net)
{
return net->ipv4.icmp_sk[smp_processor_id()];
}
static inline struct sock *icmp_xmit_lock(struct net *net)
{
struct sock *sk;
local_bh_disable();
sk = icmp_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
local_bh_enable();
return NULL;
}
return sk;
}
static inline void icmp_xmit_unlock(struct sock *sk)
{
spin_unlock_bh(&sk->sk_lock.slock);
}
/*
* Send an ICMP frame.
*/
static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
int type, int code)
{
struct dst_entry *dst = &rt->dst;
bool rc = true;
if (type > NR_ICMP_TYPES)
goto out;
/* Don't limit PMTU discovery. */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
goto out;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
goto out;
/* Limit if icmp type is enabled in ratemask. */
if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
if (!rt->peer)
rt_bind_peer(rt, 1);
rc = inet_peer_xrlim_allow(rt->peer,
net->ipv4.sysctl_icmp_ratelimit);
}
out:
return rc;
}
/*
* Maintain the counters used in the SNMP statistics for outgoing ICMP
*/
void icmp_out_count(struct net *net, unsigned char type)
{
ICMPMSGOUT_INC_STATS(net, type);
ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
}
/*
* Checksum each fragment, and on the first include the headers and final
* checksum.
*/
static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
__wsum csum;
csum = skb_copy_and_csum_bits(icmp_param->skb,
icmp_param->offset + offset,
to, len, 0);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (icmp_pointers[icmp_param->data.icmph.type].error)
nf_ct_attach(skb, icmp_param->skb);
return 0;
}
static void icmp_push_reply(struct icmp_bxm *icmp_param,
struct ipcm_cookie *ipc, struct rtable **rt)
{
struct sock *sk;
struct sk_buff *skb;
sk = icmp_sk(dev_net((*rt)->dst.dev));
if (ip_append_data(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT) < 0) {
ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
struct sk_buff *skb1;
skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len, csum);
icmph->checksum = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
}
/*
* Driving logic for building and sending ICMP messages.
*/
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct sock *sk;
struct inet_sock *inet;
__be32 daddr;
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
return;
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (icmp_param->replyopts.opt.opt.optlen) {
ipc.opt = &icmp_param->replyopts.opt;
if (ipc.opt->opt.srr)
daddr = icmp_param->replyopts.opt.opt.faddr;
}
{
struct flowi4 fl4 = {
.daddr = daddr,
.saddr = rt->rt_spec_dst,
.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
.flowi4_proto = IPPROTO_ICMP,
};
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
}
if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
icmp_param->data.icmph.code))
icmp_push_reply(icmp_param, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
}
static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
int type, int code,
struct icmp_bxm *param)
{
struct flowi4 fl4 = {
.daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr),
.saddr = saddr,
.flowi4_tos = RT_TOS(tos),
.flowi4_proto = IPPROTO_ICMP,
.fl4_icmp_type = type,
.fl4_icmp_code = code,
};
struct rtable *rt, *rt2;
int err;
security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4));
rt = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
if (!fl4.saddr)
fl4.saddr = rt->rt_src;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(&fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl4.daddr, fl4.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
/*
* Send an ICMP message in response to a situation
*
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address.
* MUST reply to only the first fragment.
*/
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct iphdr *iph;
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
struct ipcm_cookie ipc;
__be32 saddr;
u8 tos;
struct net *net;
struct sock *sk;
if (!rt)
goto out;
net = dev_net(rt->dst.dev);
/*
* Find the original header. It is expected to be valid, of course.
* Check this, icmp_send is called from the most obscure devices
* sometimes.
*/
iph = ip_hdr(skb_in);
if ((u8 *)iph < skb_in->head ||
(skb_in->network_header + sizeof(*iph)) > skb_in->tail)
goto out;
/*
* No replies to physical multicast/broadcast
*/
if (skb_in->pkt_type != PACKET_HOST)
goto out;
/*
* Now check at the protocol level
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto out;
/*
* Only reply to fragment 0. We byte re-order the constant
* mask for efficiency.
*/
if (iph->frag_off & htons(IP_OFFSET))
goto out;
/*
* If we send an ICMP error to an ICMP error a mess would result..
*/
if (icmp_pointers[type].error) {
/*
* We are an error, check if we are replying to an
* ICMP error
*/
if (iph->protocol == IPPROTO_ICMP) {
u8 _inner_type, *itp;
itp = skb_header_pointer(skb_in,
skb_network_header(skb_in) +
(iph->ihl << 2) +
offsetof(struct icmphdr,
type) -
skb_in->data,
sizeof(_inner_type),
&_inner_type);
if (itp == NULL)
goto out;
/*
* Assume any unknown ICMP type is an error. This
* isn't specified by the RFC, but think about it..
*/
if (*itp > NR_ICMP_TYPES ||
icmp_pointers[*itp].error)
goto out;
}
}
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
/*
* Construct source address and options.
*/
saddr = iph->daddr;
if (!(rt->rt_flags & RTCF_LOCAL)) {
struct net_device *dev = NULL;
rcu_read_lock();
if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
dev = dev_get_by_index_rcu(net, rt->rt_iif);
if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
}
tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
goto out_unlock;
/*
* Prepare data for ICMP header.
*/
icmp_param.data.icmph.type = type;
icmp_param.data.icmph.code = code;
icmp_param.data.icmph.un.gateway = info;
icmp_param.data.icmph.checksum = 0;
icmp_param.skb = skb_in;
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts.opt;
ipc.tx_flags = 0;
rt = icmp_route_lookup(net, skb_in, iph, saddr, tos,
type, code, &icmp_param);
if (IS_ERR(rt))
goto out_unlock;
if (!icmpv4_xrlim_allow(net, rt, type, code))
goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */
room = dst_mtu(&rt->dst);
if (room > 576)
room = 576;
room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
room -= sizeof(struct icmphdr);
icmp_param.data_len = skb_in->len - icmp_param.offset;
if (icmp_param.data_len > room)
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_push_reply(&icmp_param, &ipc, &rt);
ende:
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
out:;
}
EXPORT_SYMBOL(icmp_send);
/*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
*/
static void icmp_unreach(struct sk_buff *skb)
{
const struct iphdr *iph;
struct icmphdr *icmph;
int hash, protocol;
const struct net_protocol *ipprot;
u32 info = 0;
struct net *net;
net = dev_net(skb_dst(skb)->dev);
/*
* Incomplete header ?
* Only checks for the IP header, there should be an
* additional check for longer headers in upper levels.
*/
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out_err;
icmph = icmp_hdr(skb);
iph = (const struct iphdr *)skb->data;
if (iph->ihl < 5) /* Mangled header, drop. */
goto out_err;
if (icmph->type == ICMP_DEST_UNREACH) {
switch (icmph->code & 15) {
case ICMP_NET_UNREACH:
case ICMP_HOST_UNREACH:
case ICMP_PROT_UNREACH:
case ICMP_PORT_UNREACH:
break;
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n",
&iph->daddr);
} else {
info = ip_rt_frag_needed(net, iph,
ntohs(icmph->un.frag.mtu),
skb->dev);
if (!info)
goto out;
}
break;
case ICMP_SR_FAILED:
LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n",
&iph->daddr);
break;
default:
break;
}
if (icmph->code > NR_ICMP_UNREACH)
goto out;
} else if (icmph->type == ICMP_PARAMETERPROB)
info = ntohl(icmph->un.gateway) >> 24;
/*
* Throw it at our lower layers
*
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
* header.
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
* transport layer.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
* transport layer.
*/
/*
* Check the other end isn't violating RFC 1122. Some routers send
* bogus responses to broadcast frames. If you see this message
* first check your netmask matches at both ends, if it does then
* get the other vendor to fix their kit.
*/
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
if (net_ratelimit())
printk(KERN_WARNING "%pI4 sent an invalid ICMP "
"type %u, code %u "
"error to a broadcast: %pI4 on %s\n",
&ip_hdr(skb)->saddr,
icmph->type, icmph->code,
&iph->daddr,
skb->dev->name);
goto out;
}
/* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers.
*/
if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
goto out;
iph = (const struct iphdr *)skb->data;
protocol = iph->protocol;
/*
* Deliver ICMP message to raw sockets. Pretty useless feature?
*/
raw_icmp_error(skb, protocol, info);
hash = protocol & (MAX_INET_PROTOS - 1);
rcu_read_lock();
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, info);
rcu_read_unlock();
out:
return;
out_err:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_REDIRECT.
*/
static void icmp_redirect(struct sk_buff *skb)
{
const struct iphdr *iph;
if (skb->len < sizeof(struct iphdr))
goto out_err;
/*
* Get the copied header of the packet that caused the redirect
*/
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
iph = (const struct iphdr *)skb->data;
switch (icmp_hdr(skb)->code & 7) {
case ICMP_REDIR_NET:
case ICMP_REDIR_NETTOS:
/*
* As per RFC recommendations now handle it as a host redirect.
*/
case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS:
ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
icmp_hdr(skb)->un.gateway,
iph->saddr, skb->dev);
break;
}
out:
return;
out_err:
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
* requests.
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
* included in the reply.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
* echo requests, MUST have default=NOT.
* See also WRT handling of options once they are done and working.
*/
static void icmp_echo(struct sk_buff *skb)
{
struct net *net;
net = dev_net(skb_dst(skb)->dev);
if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
struct icmp_bxm icmp_param;
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_ECHOREPLY;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
}
}
/*
* Handle ICMP Timestamp requests.
* RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
* SHOULD be in the kernel for minimum random latency.
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
*/
static void icmp_timestamp(struct sk_buff *skb)
{
struct timespec tv;
struct icmp_bxm icmp_param;
/*
* Too short.
*/
if (skb->len < 4)
goto out_err;
/*
* Fill in the current time as ms since midnight UT:
*/
getnstimeofday(&tv);
icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
tv.tv_nsec / NSEC_PER_MSEC);
icmp_param.data.times[2] = icmp_param.data.times[1];
if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
BUG();
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
icmp_param.data.icmph.code = 0;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
out:
return;
out_err:
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_ADDRESS_MASK requests. (RFC950)
*
* RFC1122 (3.2.2.9). A host MUST only send replies to
* ADDRESS_MASK requests if it's been configured as an address mask
* agent. Receiving a request doesn't constitute implicit permission to
* act as one. Of course, implementing this correctly requires (SHOULD)
* a way to turn the functionality on and off. Another one for sysctl(),
* I guess. -- MS
*
* RFC1812 (4.3.3.9). A router MUST implement it.
* A router SHOULD have switch turning it on/off.
* This switch MUST be ON by default.
*
* Gratuitous replies, zero-source replies are not implemented,
* that complies with RFC. DO NOT implement them!!! All the idea
* of broadcast addrmask replies as specified in RFC950 is broken.
* The problem is that it is not uncommon to have several prefixes
* on one physical interface. Moreover, addrmask agent can even be
* not aware of existing another prefixes.
* If source is zero, addrmask agent cannot choose correct prefix.
* Gratuitous mask announcements suffer from the same problem.
* RFC1812 explains it, but still allows to use ADDRMASK,
* that is pretty silly. --ANK
*
* All these rules are so bizarre, that I removed kernel addrmask
* support at all. It is wrong, it is obsolete, nobody uses it in
* any case. --ANK
*
* Furthermore you can do it with a usermode address agent program
* anyway...
*/
static void icmp_address(struct sk_buff *skb)
{
#if 0
if (net_ratelimit())
printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
#endif
}
/*
* RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
* loudly if an inconsistency is found.
* called with rcu_read_lock()
*/
static void icmp_address_reply(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct in_ifaddr *ifa;
if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
return;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
if (in_dev->ifa_list &&
IN_DEV_LOG_MARTIANS(in_dev) &&
IN_DEV_FORWARD(in_dev)) {
__be32 _mask, *mp;
mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
BUG_ON(mp == NULL);
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (*mp == ifa->ifa_mask &&
inet_ifa_match(rt->rt_src, ifa))
break;
}
if (!ifa && net_ratelimit()) {
printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
mp, dev->name, &rt->rt_src);
}
}
}
static void icmp_discard(struct sk_buff *skb)
{
}
/*
* Deal with incoming ICMP packets.
*/
int icmp_rcv(struct sk_buff *skb)
{
struct icmphdr *icmph;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
int nh;
if (!(sp && sp->xvec[sp->len - 1]->props.flags &
XFRM_STATE_ICMP))
goto drop;
if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
goto drop;
nh = skb_network_offset(skb);
skb_set_network_header(skb, sizeof(*icmph));
if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
goto drop;
skb_set_network_header(skb, nh);
}
ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto error;
}
if (!pskb_pull(skb, sizeof(*icmph)))
goto error;
icmph = icmp_hdr(skb);
ICMPMSGIN_INC_STATS_BH(net, icmph->type);
/*
* 18 is the highest 'known' ICMP type. Anything else is a mystery
*
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES)
goto error;
/*
* Parse the ICMP message
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
/*
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* silently ignored (we let user decide with a sysctl).
* RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
* discarded if to broadcast/multicast.
*/
if ((icmph->type == ICMP_ECHO ||
icmph->type == ICMP_TIMESTAMP) &&
net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
goto error;
}
if (icmph->type != ICMP_ECHO &&
icmph->type != ICMP_TIMESTAMP &&
icmph->type != ICMP_ADDRESS &&
icmph->type != ICMP_ADDRESSREPLY) {
goto error;
}
}
icmp_pointers[icmph->type].handler(skb);
drop:
kfree_skb(skb);
return 0;
error:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
goto drop;
}
/*
* This table is the definition of how we handle ICMP.
*/
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
.handler = icmp_discard,
},
[1] = {
.handler = icmp_discard,
.error = 1,
},
[2] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
.handler = icmp_redirect,
.error = 1,
},
[6] = {
.handler = icmp_discard,
.error = 1,
},
[7] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
.handler = icmp_echo,
},
[9] = {
.handler = icmp_discard,
.error = 1,
},
[10] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
.handler = icmp_address,
},
[ICMP_ADDRESSREPLY] = {
.handler = icmp_address_reply,
},
};
static void __net_exit icmp_sk_exit(struct net *net)
{
int i;
for_each_possible_cpu(i)
inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
kfree(net->ipv4.icmp_sk);
net->ipv4.icmp_sk = NULL;
}
static int __net_init icmp_sk_init(struct net *net)
{
int i, err;
net->ipv4.icmp_sk =
kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
if (net->ipv4.icmp_sk == NULL)
return -ENOMEM;
for_each_possible_cpu(i) {
struct sock *sk;
err = inet_ctl_sock_create(&sk, PF_INET,
SOCK_RAW, IPPROTO_ICMP, net);
if (err < 0)
goto fail;
net->ipv4.icmp_sk[i] = sk;
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
*/
sk->sk_sndbuf =
(2 * ((64 * 1024) + sizeof(struct sk_buff)));
/*
* Speedup sock_wfree()
*/
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
}
/* Control parameters for ECHO replies. */
net->ipv4.sysctl_icmp_echo_ignore_all = 0;
net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
/* Control parameter - ignore bogus broadcast responses? */
net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
/*
* Configurable global rate limit.
*
* ratelimit defines tokens/packet consumed for dst->rate_token
* bucket ratemask defines which icmp types are ratelimited by
* setting it's bit position.
*
* default:
* dest unreachable (3), source quench (4),
* time exceeded (11), parameter problem (12)
*/
net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
net->ipv4.sysctl_icmp_ratemask = 0x1818;
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
return 0;
fail:
for_each_possible_cpu(i)
inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
kfree(net->ipv4.icmp_sk);
return err;
}
static struct pernet_operations __net_initdata icmp_sk_ops = {
.init = icmp_sk_init,
.exit = icmp_sk_exit,
};
int __init icmp_init(void)
{
return register_pernet_subsys(&icmp_sk_ops);
}
|
128,202,841,162,889 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Support for INET connection oriented protocols.
*
* Authors: See the TCP sources
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or(at your option) any later version.
*/
#include <linux/module.h>
#include <linux/jhash.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif
/*
* This struct holds the first and last local port number.
*/
struct local_ports sysctl_local_ports __read_mostly = {
.lock = SEQLOCK_UNLOCKED,
.range = { 32768, 61000 },
};
unsigned long *sysctl_local_reserved_ports;
EXPORT_SYMBOL(sysctl_local_reserved_ports);
void inet_get_local_port_range(int *low, int *high)
{
unsigned seq;
do {
seq = read_seqbegin(&sysctl_local_ports.lock);
*low = sysctl_local_ports.range[0];
*high = sysctl_local_ports.range[1];
} while (read_seqretry(&sysctl_local_ports.lock, seq));
}
EXPORT_SYMBOL(inet_get_local_port_range);
int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
struct sock *sk2;
struct hlist_node *node;
int reuse = sk->sk_reuse;
/*
* Unlike other sk lookup places we do not check
* for sk_net here, since _all_ the socks listed
* in tb->owners list belong to the same net - the
* one this bucket belongs to.
*/
sk_for_each_bound(sk2, node, &tb->owners) {
if (sk != sk2 &&
!inet_v6_ipv6only(sk2) &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
break;
}
}
}
return node != NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
*/
int inet_csk_get_port(struct sock *sk, unsigned short snum)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_bind_hashbucket *head;
struct hlist_node *node;
struct inet_bind_bucket *tb;
int ret, attempts = 5;
struct net *net = sock_net(sk);
int smallest_size = -1, smallest_rover;
local_bh_disable();
if (!snum) {
int remaining, rover, low, high;
again:
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
smallest_rover = rover = net_random() % remaining + low;
smallest_size = -1;
do {
if (inet_is_reserved_local_port(rover))
goto next_nolock;
head = &hashinfo->bhash[inet_bhashfn(net, rover,
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == rover) {
if (tb->fastreuse > 0 &&
sk->sk_reuse &&
sk->sk_state != TCP_LISTEN &&
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
}
}
goto next;
}
break;
next:
spin_unlock(&head->lock);
next_nolock:
if (++rover > high)
rover = low;
} while (--remaining > 0);
/* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash
* locks if this test triggers, because if 'remaining'
* drops to zero, we broke out of the do/while loop at
* the top level, not from the 'break;' statement.
*/
ret = 1;
if (remaining <= 0) {
if (smallest_size != -1) {
snum = smallest_rover;
goto have_snum;
}
goto fail;
}
/* OK, here is the one we will use. HEAD is
* non-NULL and we hold it's mutex.
*/
snum = rover;
} else {
have_snum:
head = &hashinfo->bhash[inet_bhashfn(net, snum,
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == snum)
goto tb_found;
}
tb = NULL;
goto tb_not_found;
tb_found:
if (!hlist_empty(&tb->owners)) {
if (tb->fastreuse > 0 &&
sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
smallest_size == -1) {
goto success;
} else {
ret = 1;
if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
smallest_size != -1 && --attempts >= 0) {
spin_unlock(&head->lock);
goto again;
}
goto fail_unlock;
}
}
}
tb_not_found:
ret = 1;
if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
net, head, snum)) == NULL)
goto fail_unlock;
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
} else if (tb->fastreuse &&
(!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
ret = 0;
fail_unlock:
spin_unlock(&head->lock);
fail:
local_bh_enable();
return ret;
}
EXPORT_SYMBOL_GPL(inet_csk_get_port);
/*
* Wait for an incoming connection, avoid race conditions. This must be called
* with the socket locked.
*/
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{
struct inet_connection_sock *icsk = inet_csk(sk);
DEFINE_WAIT(wait);
int err;
/*
* True wake-one mechanism for incoming connections: only
* one process gets woken up, not the 'whole herd'.
* Since we do not 'race & poll' for established sockets
* anymore, the common case will execute the loop only once.
*
* Subtle issue: "add_wait_queue_exclusive()" will be added
* after any current non-exclusive waiters, and we know that
* it will always _stay_ after any new non-exclusive waiters
* because all non-exclusive waiters are added at the
* beginning of the wait-queue. As such, it's ok to "drop"
* our exclusiveness temporarily when we get woken up without
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
err = 0;
if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
break;
err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
err = -EAGAIN;
if (!timeo)
break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
/*
* This will accept the next outstanding connection.
*/
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *newsk;
int error;
lock_sock(sk);
/* We need to make sure that this socket is listening,
* and that it has something pending.
*/
error = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
goto out_err;
/* Find already established connection */
if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */
error = -EAGAIN;
if (!timeo)
goto out_err;
error = inet_csk_wait_for_connect(sk, timeo);
if (error)
goto out_err;
}
newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
WARN_ON(newsk->sk_state == TCP_SYN_RECV);
out:
release_sock(sk);
return newsk;
out_err:
newsk = NULL;
*err = error;
goto out;
}
EXPORT_SYMBOL(inet_csk_accept);
/*
* Using different timers for retransmit, delayed acks and probes
* We may wish use just one timer maintaining a list of expire jiffies
* to optimize.
*/
void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(unsigned long),
void (*delack_handler)(unsigned long),
void (*keepalive_handler)(unsigned long))
{
struct inet_connection_sock *icsk = inet_csk(sk);
setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
(unsigned long)sk);
setup_timer(&icsk->icsk_delack_timer, delack_handler,
(unsigned long)sk);
setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);
void inet_csk_clear_xmit_timers(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
void inet_csk_delete_keepalive_timer(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
struct dst_entry *inet_csk_route_req(struct sock *sk,
const struct request_sock *req)
{
struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options *opt = inet_rsk(req)->opt;
struct net *net = sock_net(sk);
struct flowi4 fl4;
flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt))
goto no_route;
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto route_err;
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
const u32 rnd, const u32 synq_hsize)
{
return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
#define AF_INET_FAMILY(fam) 1
#endif
struct request_sock *inet_csk_search_req(const struct sock *sk,
struct request_sock ***prevp,
const __be16 rport, const __be32 raddr,
const __be32 laddr)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
struct request_sock *req, **prev;
for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
lopt->nr_table_entries)];
(req = *prev) != NULL;
prev = &req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req);
if (ireq->rmt_port == rport &&
ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) {
WARN_ON(req->sk);
*prevp = prev;
break;
}
}
return req;
}
EXPORT_SYMBOL_GPL(inet_csk_search_req);
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
lopt->hash_rnd, lopt->nr_table_entries);
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
inet_csk_reqsk_queue_added(sk, timeout);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
/* Only thing we need from tcp.h */
extern int sysctl_tcp_synack_retries;
/* Decide when to expire the request and when to resend SYN-ACK */
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
const int max_retries,
const u8 rskq_defer_accept,
int *expire, int *resend)
{
if (!rskq_defer_accept) {
*expire = req->retrans >= thresh;
*resend = 1;
return;
}
*expire = req->retrans >= thresh &&
(!inet_rsk(req)->acked || req->retrans >= max_retries);
/*
* Do not resend while waiting for data after ACK,
* start to resend on end of deferring period to give
* last chance for data or ACK to create established socket.
*/
*resend = !inet_rsk(req)->acked ||
req->retrans >= rskq_defer_accept - 1;
}
void inet_csk_reqsk_queue_prune(struct sock *parent,
const unsigned long interval,
const unsigned long timeout,
const unsigned long max_rto)
{
struct inet_connection_sock *icsk = inet_csk(parent);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct listen_sock *lopt = queue->listen_opt;
int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
int thresh = max_retries;
unsigned long now = jiffies;
struct request_sock **reqp, *req;
int i, budget;
if (lopt == NULL || lopt->qlen == 0)
return;
/* Normally all the openreqs are young and become mature
* (i.e. converted to established socket) for first timeout.
* If synack was not acknowledged for 3 seconds, it means
* one of the following things: synack was lost, ack was lost,
* rtt is high or nobody planned to ack (i.e. synflood).
* When server is a bit loaded, queue is populated with old
* open requests, reducing effective size of queue.
* When server is well loaded, queue size reduces to zero
* after several minutes of work. It is not synflood,
* it is normal operation. The solution is pruning
* too old entries overriding normal timeout, when
* situation becomes dangerous.
*
* Essentially, we reserve half of room for young
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
if (lopt->qlen>>(lopt->max_qlen_log-1)) {
int young = (lopt->qlen_young<<1);
while (thresh > 2) {
if (lopt->qlen < young)
break;
thresh--;
young <<= 1;
}
}
if (queue->rskq_defer_accept)
max_retries = queue->rskq_defer_accept;
budget = 2 * (lopt->nr_table_entries / (timeout / interval));
i = lopt->clock_hand;
do {
reqp=&lopt->syn_table[i];
while ((req = *reqp) != NULL) {
if (time_after_eq(now, req->expires)) {
int expire = 0, resend = 0;
syn_ack_recalc(req, thresh, max_retries,
queue->rskq_defer_accept,
&expire, &resend);
if (req->rsk_ops->syn_ack_timeout)
req->rsk_ops->syn_ack_timeout(parent, req);
if (!expire &&
(!resend ||
!req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
inet_rsk(req)->acked)) {
unsigned long timeo;
if (req->retrans++ == 0)
lopt->qlen_young--;
timeo = min((timeout << req->retrans), max_rto);
req->expires = now + timeo;
reqp = &req->dl_next;
continue;
}
/* Drop this request */
inet_csk_reqsk_queue_unlink(parent, req, reqp);
reqsk_queue_removed(queue, req);
reqsk_free(req);
continue;
}
reqp = &req->dl_next;
}
i = (i + 1) & (lopt->nr_table_entries - 1);
} while (--budget > 0);
lopt->clock_hand = i;
if (lopt->qlen)
inet_csk_reset_keepalive_timer(parent, interval);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
const gfp_t priority)
{
struct sock *newsk = sk_clone(sk, priority);
if (newsk != NULL) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
newsk->sk_state = TCP_SYN_RECV;
newicsk->icsk_bind_hash = NULL;
inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
newsk->sk_write_space = sk_stream_write_space;
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
security_inet_csk_clone(newsk, req);
}
return newsk;
}
EXPORT_SYMBOL_GPL(inet_csk_clone);
/*
* At this point, there should be no process reference to this
* socket, and thus no user references at all. Therefore we
* can assume the socket waitqueue is inactive and nobody will
* try to jump onto it.
*/
void inet_csk_destroy_sock(struct sock *sk)
{
WARN_ON(sk->sk_state != TCP_CLOSE);
WARN_ON(!sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
WARN_ON(!sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
sk_refcnt_debug_release(sk);
percpu_counter_dec(sk->sk_prot->orphan_count);
sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
if (rc != 0)
return rc;
sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
sk->sk_state = TCP_LISTEN;
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
inet->inet_sport = htons(inet->inet_num);
sk_dst_reset(sk);
sk->sk_prot->hash(sk);
return 0;
}
sk->sk_state = TCP_CLOSE;
__reqsk_queue_destroy(&icsk->icsk_accept_queue);
return -EADDRINUSE;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
/*
* This routine closes sockets which have been at least partially
* opened, but not yet accepted.
*/
void inet_csk_listen_stop(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock *acc_req;
struct request_sock *req;
inet_csk_delete_keepalive_timer(sk);
/* make all the listen_opt local to us */
acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
/* Following specs, it would be better either to send FIN
* (and enter FIN-WAIT-1, it is normal close)
* or to send active reset (abort).
* Certainly, it is pretty dangerous while synflood, but it is
* bad justification for our negligence 8)
* To be honest, we are not able to make either
* of the variants now. --ANK
*/
reqsk_queue_destroy(&icsk->icsk_accept_queue);
while ((req = acc_req) != NULL) {
struct sock *child = req->sk;
acc_req = req->dl_next;
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
sock_hold(child);
sk->sk_prot->disconnect(child, O_NONBLOCK);
sock_orphan(child);
percpu_counter_inc(sk->sk_prot->orphan_count);
inet_csk_destroy_sock(child);
bh_unlock_sock(child);
local_bh_enable();
sock_put(child);
sk_acceptq_removed(sk);
__reqsk_free(req);
}
WARN_ON(sk->sk_ack_backlog);
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
const struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = inet->inet_daddr;
sin->sin_port = inet->inet_dport;
}
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
#ifdef CONFIG_COMPAT
int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_af_ops->compat_getsockopt != NULL)
return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_af_ops->compat_setsockopt != NULL)
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Support for INET connection oriented protocols.
*
* Authors: See the TCP sources
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or(at your option) any later version.
*/
#include <linux/module.h>
#include <linux/jhash.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif
/*
* This struct holds the first and last local port number.
*/
struct local_ports sysctl_local_ports __read_mostly = {
.lock = SEQLOCK_UNLOCKED,
.range = { 32768, 61000 },
};
unsigned long *sysctl_local_reserved_ports;
EXPORT_SYMBOL(sysctl_local_reserved_ports);
void inet_get_local_port_range(int *low, int *high)
{
unsigned seq;
do {
seq = read_seqbegin(&sysctl_local_ports.lock);
*low = sysctl_local_ports.range[0];
*high = sysctl_local_ports.range[1];
} while (read_seqretry(&sysctl_local_ports.lock, seq));
}
EXPORT_SYMBOL(inet_get_local_port_range);
int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
struct sock *sk2;
struct hlist_node *node;
int reuse = sk->sk_reuse;
/*
* Unlike other sk lookup places we do not check
* for sk_net here, since _all_ the socks listed
* in tb->owners list belong to the same net - the
* one this bucket belongs to.
*/
sk_for_each_bound(sk2, node, &tb->owners) {
if (sk != sk2 &&
!inet_v6_ipv6only(sk2) &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
break;
}
}
}
return node != NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
/* Obtain a reference to a local port for the given sock,
* if snum is zero it means select any available local port.
*/
int inet_csk_get_port(struct sock *sk, unsigned short snum)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_bind_hashbucket *head;
struct hlist_node *node;
struct inet_bind_bucket *tb;
int ret, attempts = 5;
struct net *net = sock_net(sk);
int smallest_size = -1, smallest_rover;
local_bh_disable();
if (!snum) {
int remaining, rover, low, high;
again:
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
smallest_rover = rover = net_random() % remaining + low;
smallest_size = -1;
do {
if (inet_is_reserved_local_port(rover))
goto next_nolock;
head = &hashinfo->bhash[inet_bhashfn(net, rover,
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == rover) {
if (tb->fastreuse > 0 &&
sk->sk_reuse &&
sk->sk_state != TCP_LISTEN &&
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
}
}
goto next;
}
break;
next:
spin_unlock(&head->lock);
next_nolock:
if (++rover > high)
rover = low;
} while (--remaining > 0);
/* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash
* locks if this test triggers, because if 'remaining'
* drops to zero, we broke out of the do/while loop at
* the top level, not from the 'break;' statement.
*/
ret = 1;
if (remaining <= 0) {
if (smallest_size != -1) {
snum = smallest_rover;
goto have_snum;
}
goto fail;
}
/* OK, here is the one we will use. HEAD is
* non-NULL and we hold it's mutex.
*/
snum = rover;
} else {
have_snum:
head = &hashinfo->bhash[inet_bhashfn(net, snum,
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == snum)
goto tb_found;
}
tb = NULL;
goto tb_not_found;
tb_found:
if (!hlist_empty(&tb->owners)) {
if (tb->fastreuse > 0 &&
sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
smallest_size == -1) {
goto success;
} else {
ret = 1;
if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
smallest_size != -1 && --attempts >= 0) {
spin_unlock(&head->lock);
goto again;
}
goto fail_unlock;
}
}
}
tb_not_found:
ret = 1;
if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
net, head, snum)) == NULL)
goto fail_unlock;
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
else
tb->fastreuse = 0;
} else if (tb->fastreuse &&
(!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
tb->fastreuse = 0;
success:
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
ret = 0;
fail_unlock:
spin_unlock(&head->lock);
fail:
local_bh_enable();
return ret;
}
EXPORT_SYMBOL_GPL(inet_csk_get_port);
/*
* Wait for an incoming connection, avoid race conditions. This must be called
* with the socket locked.
*/
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{
struct inet_connection_sock *icsk = inet_csk(sk);
DEFINE_WAIT(wait);
int err;
/*
* True wake-one mechanism for incoming connections: only
* one process gets woken up, not the 'whole herd'.
* Since we do not 'race & poll' for established sockets
* anymore, the common case will execute the loop only once.
*
* Subtle issue: "add_wait_queue_exclusive()" will be added
* after any current non-exclusive waiters, and we know that
* it will always _stay_ after any new non-exclusive waiters
* because all non-exclusive waiters are added at the
* beginning of the wait-queue. As such, it's ok to "drop"
* our exclusiveness temporarily when we get woken up without
* having to remove and re-insert us on the wait queue.
*/
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
timeo = schedule_timeout(timeo);
lock_sock(sk);
err = 0;
if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
break;
err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
break;
err = sock_intr_errno(timeo);
if (signal_pending(current))
break;
err = -EAGAIN;
if (!timeo)
break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
/*
* This will accept the next outstanding connection.
*/
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *newsk;
int error;
lock_sock(sk);
/* We need to make sure that this socket is listening,
* and that it has something pending.
*/
error = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
goto out_err;
/* Find already established connection */
if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
/* If this is a non blocking socket don't sleep */
error = -EAGAIN;
if (!timeo)
goto out_err;
error = inet_csk_wait_for_connect(sk, timeo);
if (error)
goto out_err;
}
newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
WARN_ON(newsk->sk_state == TCP_SYN_RECV);
out:
release_sock(sk);
return newsk;
out_err:
newsk = NULL;
*err = error;
goto out;
}
EXPORT_SYMBOL(inet_csk_accept);
/*
* Using different timers for retransmit, delayed acks and probes
* We may wish use just one timer maintaining a list of expire jiffies
* to optimize.
*/
void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(unsigned long),
void (*delack_handler)(unsigned long),
void (*keepalive_handler)(unsigned long))
{
struct inet_connection_sock *icsk = inet_csk(sk);
setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
(unsigned long)sk);
setup_timer(&icsk->icsk_delack_timer, delack_handler,
(unsigned long)sk);
setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);
void inet_csk_clear_xmit_timers(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
void inet_csk_delete_keepalive_timer(struct sock *sk)
{
sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
struct dst_entry *inet_csk_route_req(struct sock *sk,
const struct request_sock *req)
{
struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options_rcu *opt = inet_rsk(req)->opt;
struct net *net = sock_net(sk);
struct flowi4 fl4;
flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt))
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_dst != rt->rt_gateway)
goto route_err;
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
const u32 rnd, const u32 synq_hsize)
{
return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
#define AF_INET_FAMILY(fam) 1
#endif
struct request_sock *inet_csk_search_req(const struct sock *sk,
struct request_sock ***prevp,
const __be16 rport, const __be32 raddr,
const __be32 laddr)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
struct request_sock *req, **prev;
for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
lopt->nr_table_entries)];
(req = *prev) != NULL;
prev = &req->dl_next) {
const struct inet_request_sock *ireq = inet_rsk(req);
if (ireq->rmt_port == rport &&
ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) {
WARN_ON(req->sk);
*prevp = prev;
break;
}
}
return req;
}
EXPORT_SYMBOL_GPL(inet_csk_search_req);
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
lopt->hash_rnd, lopt->nr_table_entries);
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
inet_csk_reqsk_queue_added(sk, timeout);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
/* Only thing we need from tcp.h */
extern int sysctl_tcp_synack_retries;
/* Decide when to expire the request and when to resend SYN-ACK */
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
const int max_retries,
const u8 rskq_defer_accept,
int *expire, int *resend)
{
if (!rskq_defer_accept) {
*expire = req->retrans >= thresh;
*resend = 1;
return;
}
*expire = req->retrans >= thresh &&
(!inet_rsk(req)->acked || req->retrans >= max_retries);
/*
* Do not resend while waiting for data after ACK,
* start to resend on end of deferring period to give
* last chance for data or ACK to create established socket.
*/
*resend = !inet_rsk(req)->acked ||
req->retrans >= rskq_defer_accept - 1;
}
void inet_csk_reqsk_queue_prune(struct sock *parent,
const unsigned long interval,
const unsigned long timeout,
const unsigned long max_rto)
{
struct inet_connection_sock *icsk = inet_csk(parent);
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
struct listen_sock *lopt = queue->listen_opt;
int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
int thresh = max_retries;
unsigned long now = jiffies;
struct request_sock **reqp, *req;
int i, budget;
if (lopt == NULL || lopt->qlen == 0)
return;
/* Normally all the openreqs are young and become mature
* (i.e. converted to established socket) for first timeout.
* If synack was not acknowledged for 3 seconds, it means
* one of the following things: synack was lost, ack was lost,
* rtt is high or nobody planned to ack (i.e. synflood).
* When server is a bit loaded, queue is populated with old
* open requests, reducing effective size of queue.
* When server is well loaded, queue size reduces to zero
* after several minutes of work. It is not synflood,
* it is normal operation. The solution is pruning
* too old entries overriding normal timeout, when
* situation becomes dangerous.
*
* Essentially, we reserve half of room for young
* embrions; and abort old ones without pity, if old
* ones are about to clog our table.
*/
if (lopt->qlen>>(lopt->max_qlen_log-1)) {
int young = (lopt->qlen_young<<1);
while (thresh > 2) {
if (lopt->qlen < young)
break;
thresh--;
young <<= 1;
}
}
if (queue->rskq_defer_accept)
max_retries = queue->rskq_defer_accept;
budget = 2 * (lopt->nr_table_entries / (timeout / interval));
i = lopt->clock_hand;
do {
reqp=&lopt->syn_table[i];
while ((req = *reqp) != NULL) {
if (time_after_eq(now, req->expires)) {
int expire = 0, resend = 0;
syn_ack_recalc(req, thresh, max_retries,
queue->rskq_defer_accept,
&expire, &resend);
if (req->rsk_ops->syn_ack_timeout)
req->rsk_ops->syn_ack_timeout(parent, req);
if (!expire &&
(!resend ||
!req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
inet_rsk(req)->acked)) {
unsigned long timeo;
if (req->retrans++ == 0)
lopt->qlen_young--;
timeo = min((timeout << req->retrans), max_rto);
req->expires = now + timeo;
reqp = &req->dl_next;
continue;
}
/* Drop this request */
inet_csk_reqsk_queue_unlink(parent, req, reqp);
reqsk_queue_removed(queue, req);
reqsk_free(req);
continue;
}
reqp = &req->dl_next;
}
i = (i + 1) & (lopt->nr_table_entries - 1);
} while (--budget > 0);
lopt->clock_hand = i;
if (lopt->qlen)
inet_csk_reset_keepalive_timer(parent, interval);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
const gfp_t priority)
{
struct sock *newsk = sk_clone(sk, priority);
if (newsk != NULL) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
newsk->sk_state = TCP_SYN_RECV;
newicsk->icsk_bind_hash = NULL;
inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
newsk->sk_write_space = sk_stream_write_space;
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
security_inet_csk_clone(newsk, req);
}
return newsk;
}
EXPORT_SYMBOL_GPL(inet_csk_clone);
/*
* At this point, there should be no process reference to this
* socket, and thus no user references at all. Therefore we
* can assume the socket waitqueue is inactive and nobody will
* try to jump onto it.
*/
void inet_csk_destroy_sock(struct sock *sk)
{
WARN_ON(sk->sk_state != TCP_CLOSE);
WARN_ON(!sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
WARN_ON(!sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk);
sk_stream_kill_queues(sk);
xfrm_sk_free_policy(sk);
sk_refcnt_debug_release(sk);
percpu_counter_dec(sk->sk_prot->orphan_count);
sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
if (rc != 0)
return rc;
sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
sk->sk_state = TCP_LISTEN;
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
inet->inet_sport = htons(inet->inet_num);
sk_dst_reset(sk);
sk->sk_prot->hash(sk);
return 0;
}
sk->sk_state = TCP_CLOSE;
__reqsk_queue_destroy(&icsk->icsk_accept_queue);
return -EADDRINUSE;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
/*
* This routine closes sockets which have been at least partially
* opened, but not yet accepted.
*/
void inet_csk_listen_stop(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct request_sock *acc_req;
struct request_sock *req;
inet_csk_delete_keepalive_timer(sk);
/* make all the listen_opt local to us */
acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
/* Following specs, it would be better either to send FIN
* (and enter FIN-WAIT-1, it is normal close)
* or to send active reset (abort).
* Certainly, it is pretty dangerous while synflood, but it is
* bad justification for our negligence 8)
* To be honest, we are not able to make either
* of the variants now. --ANK
*/
reqsk_queue_destroy(&icsk->icsk_accept_queue);
while ((req = acc_req) != NULL) {
struct sock *child = req->sk;
acc_req = req->dl_next;
local_bh_disable();
bh_lock_sock(child);
WARN_ON(sock_owned_by_user(child));
sock_hold(child);
sk->sk_prot->disconnect(child, O_NONBLOCK);
sock_orphan(child);
percpu_counter_inc(sk->sk_prot->orphan_count);
inet_csk_destroy_sock(child);
bh_unlock_sock(child);
local_bh_enable();
sock_put(child);
sk_acceptq_removed(sk);
__reqsk_free(req);
}
WARN_ON(sk->sk_ack_backlog);
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
const struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = inet->inet_daddr;
sin->sin_port = inet->inet_dport;
}
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
#ifdef CONFIG_COMPAT
int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_af_ops->compat_getsockopt != NULL)
return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_af_ops->compat_setsockopt != NULL)
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
|
134,398,982,668,002 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The options processing module for ip.c
*
* Authors: A.N.Kuznetsov
*
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/cipso_ipv4.h>
/*
* Write options to IP header, record destination address to
* source route option, address of outgoing interface
* (we should already know it, so that this function is allowed be
* called only after routing decision) and timestamp,
* if we originate this datagram.
*
* daddr is real destination address, next hop is recorded in IP header.
* saddr is address of outgoing interface.
*/
void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
__be32 daddr, struct rtable *rt, int is_frag)
{
unsigned char *iph = skb_network_header(skb);
memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options));
memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen);
opt = &(IPCB(skb)->opt);
if (opt->srr)
memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4);
if (!is_frag) {
if (opt->rr_needaddr)
ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt);
if (opt->ts_needaddr)
ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt);
if (opt->ts_needtime) {
struct timespec tv;
__be32 midtime;
getnstimeofday(&tv);
midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
}
return;
}
if (opt->rr) {
memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]);
opt->rr = 0;
opt->rr_needaddr = 0;
}
if (opt->ts) {
memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]);
opt->ts = 0;
opt->ts_needaddr = opt->ts_needtime = 0;
}
}
/*
* Provided (sopt, skb) points to received options,
* build in dopt compiled option set appropriate for answering.
* i.e. invert SRR option, copy anothers,
* and grab room in RR/TS options.
*
* NOTE: dopt cannot point to skb.
*/
int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
{
struct ip_options *sopt;
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
__be32 daddr;
memset(dopt, 0, sizeof(struct ip_options));
sopt = &(IPCB(skb)->opt);
if (sopt->optlen == 0) {
dopt->optlen = 0;
return 0;
}
sptr = skb_network_header(skb);
dptr = dopt->__data;
daddr = skb_rtable(skb)->rt_spec_dst;
if (sopt->rr) {
optlen = sptr[sopt->rr+1];
soffset = sptr[sopt->rr+2];
dopt->rr = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->rr, optlen);
if (sopt->rr_needaddr && soffset <= optlen) {
if (soffset + 3 > optlen)
return -EINVAL;
dptr[2] = soffset + 4;
dopt->rr_needaddr = 1;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->ts) {
optlen = sptr[sopt->ts+1];
soffset = sptr[sopt->ts+2];
dopt->ts = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->ts, optlen);
if (soffset <= optlen) {
if (sopt->ts_needaddr) {
if (soffset + 3 > optlen)
return -EINVAL;
dopt->ts_needaddr = 1;
soffset += 4;
}
if (sopt->ts_needtime) {
if (soffset + 3 > optlen)
return -EINVAL;
if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) {
dopt->ts_needtime = 1;
soffset += 4;
} else {
dopt->ts_needtime = 0;
if (soffset + 7 <= optlen) {
__be32 addr;
memcpy(&addr, dptr+soffset-1, 4);
if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) {
dopt->ts_needtime = 1;
soffset += 8;
}
}
}
}
dptr[2] = soffset;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->srr) {
unsigned char * start = sptr+sopt->srr;
__be32 faddr;
optlen = start[1];
soffset = start[2];
doffset = 0;
if (soffset > optlen)
soffset = optlen + 1;
soffset -= 4;
if (soffset > 3) {
memcpy(&faddr, &start[soffset-1], 4);
for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4)
memcpy(&dptr[doffset-1], &start[soffset-1], 4);
/*
* RFC1812 requires to fix illegal source routes.
*/
if (memcmp(&ip_hdr(skb)->saddr,
&start[soffset + 3], 4) == 0)
doffset -= 4;
}
if (doffset > 3) {
memcpy(&start[doffset-1], &daddr, 4);
dopt->faddr = faddr;
dptr[0] = start[0];
dptr[1] = doffset+3;
dptr[2] = 4;
dptr += doffset+3;
dopt->srr = dopt->optlen + sizeof(struct iphdr);
dopt->optlen += doffset+3;
dopt->is_strictroute = sopt->is_strictroute;
}
}
if (sopt->cipso) {
optlen = sptr[sopt->cipso+1];
dopt->cipso = dopt->optlen+sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->cipso, optlen);
dptr += optlen;
dopt->optlen += optlen;
}
while (dopt->optlen & 3) {
*dptr++ = IPOPT_END;
dopt->optlen++;
}
return 0;
}
/*
* Options "fragmenting", just fill options not
* allowed in fragments with NOOPs.
* Simple and stupid 8), but the most efficient way.
*/
void ip_options_fragment(struct sk_buff * skb)
{
unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
struct ip_options * opt = &(IPCB(skb)->opt);
int l = opt->optlen;
int optlen;
while (l > 0) {
switch (*optptr) {
case IPOPT_END:
return;
case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
if (optlen<2 || optlen>l)
return;
if (!IPOPT_COPIED(*optptr))
memset(optptr, IPOPT_NOOP, optlen);
l -= optlen;
optptr += optlen;
}
opt->ts = 0;
opt->rr = 0;
opt->rr_needaddr = 0;
opt->ts_needaddr = 0;
opt->ts_needtime = 0;
}
/*
* Verify options and fill pointers in struct options.
* Caller should clear *opt, and set opt->data.
* If opt == NULL, then skb->data should point to IP header.
*/
int ip_options_compile(struct net *net,
struct ip_options * opt, struct sk_buff * skb)
{
int l;
unsigned char * iph;
unsigned char * optptr;
int optlen;
unsigned char * pp_ptr = NULL;
struct rtable *rt = NULL;
if (skb != NULL) {
rt = skb_rtable(skb);
optptr = (unsigned char *)&(ip_hdr(skb)[1]);
} else
optptr = opt->__data;
iph = optptr - sizeof(struct iphdr);
for (l = opt->optlen; l > 0; ) {
switch (*optptr) {
case IPOPT_END:
for (optptr++, l--; l>0; optptr++, l--) {
if (*optptr != IPOPT_END) {
*optptr = IPOPT_END;
opt->is_changed = 1;
}
}
goto eol;
case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
if (optlen<2 || optlen>l) {
pp_ptr = optptr;
goto error;
}
switch (*optptr) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
/* NB: cf RFC-1812 5.2.4.1 */
if (opt->srr) {
pp_ptr = optptr;
goto error;
}
if (!skb) {
if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) {
pp_ptr = optptr + 1;
goto error;
}
memcpy(&opt->faddr, &optptr[3], 4);
if (optlen > 7)
memmove(&optptr[3], &optptr[7], optlen-7);
}
opt->is_strictroute = (optptr[0] == IPOPT_SSRR);
opt->srr = optptr - iph;
break;
case IPOPT_RR:
if (opt->rr) {
pp_ptr = optptr;
goto error;
}
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
if (optptr[2]+3 > optlen) {
pp_ptr = optptr + 2;
goto error;
}
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
optptr[2] += 4;
opt->rr_needaddr = 1;
}
opt->rr = optptr - iph;
break;
case IPOPT_TIMESTAMP:
if (opt->ts) {
pp_ptr = optptr;
goto error;
}
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 5) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
__be32 *timeptr = NULL;
if (optptr[2]+3 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
switch (optptr[3]&0xF) {
case IPOPT_TS_TSONLY:
opt->ts = optptr - iph;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]-1];
opt->ts_needtime = 1;
optptr[2] += 4;
break;
case IPOPT_TS_TSANDADDR:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
opt->ts_needtime = 1;
optptr[2] += 8;
break;
case IPOPT_TS_PRESPEC:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
{
__be32 addr;
memcpy(&addr, &optptr[optptr[2]-1], 4);
if (inet_addr_type(net, addr) == RTN_UNICAST)
break;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needtime = 1;
optptr[2] += 8;
break;
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr + 3;
goto error;
}
break;
}
if (timeptr) {
struct timespec tv;
__be32 midtime;
getnstimeofday(&tv);
midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
memcpy(timeptr, &midtime, sizeof(__be32));
opt->is_changed = 1;
}
} else {
unsigned overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;
goto error;
}
opt->ts = optptr - iph;
if (skb) {
optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
opt->is_changed = 1;
}
}
break;
case IPOPT_RA:
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] == 0 && optptr[3] == 0)
opt->router_alert = optptr - iph;
break;
case IPOPT_CIPSO:
if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) {
pp_ptr = optptr;
goto error;
}
opt->cipso = optptr - iph;
if (cipso_v4_validate(skb, &optptr)) {
pp_ptr = optptr;
goto error;
}
break;
case IPOPT_SEC:
case IPOPT_SID:
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr;
goto error;
}
break;
}
l -= optlen;
optptr += optlen;
}
eol:
if (!pp_ptr)
return 0;
error:
if (skb) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
}
return -EINVAL;
}
EXPORT_SYMBOL(ip_options_compile);
/*
* Undo all the changes done by ip_options_compile().
*/
void ip_options_undo(struct ip_options * opt)
{
if (opt->srr) {
unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr);
memmove(optptr+7, optptr+3, optptr[1]-7);
memcpy(optptr+3, &opt->faddr, 4);
}
if (opt->rr_needaddr) {
unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr);
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
}
if (opt->ts) {
unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr);
if (opt->ts_needtime) {
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
if ((optptr[3]&0xF) == IPOPT_TS_PRESPEC)
optptr[2] -= 4;
}
if (opt->ts_needaddr) {
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
}
}
}
static struct ip_options *ip_options_get_alloc(const int optlen)
{
return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3),
GFP_KERNEL);
}
static int ip_options_get_finish(struct net *net, struct ip_options **optp,
struct ip_options *opt, int optlen)
{
while (optlen & 3)
opt->__data[optlen++] = IPOPT_END;
opt->optlen = optlen;
if (optlen && ip_options_compile(net, opt, NULL)) {
kfree(opt);
return -EINVAL;
}
kfree(*optp);
*optp = opt;
return 0;
}
int ip_options_get_from_user(struct net *net, struct ip_options **optp,
unsigned char __user *data, int optlen)
{
struct ip_options *opt = ip_options_get_alloc(optlen);
if (!opt)
return -ENOMEM;
if (optlen && copy_from_user(opt->__data, data, optlen)) {
kfree(opt);
return -EFAULT;
}
return ip_options_get_finish(net, optp, opt, optlen);
}
int ip_options_get(struct net *net, struct ip_options **optp,
unsigned char *data, int optlen)
{
struct ip_options *opt = ip_options_get_alloc(optlen);
if (!opt)
return -ENOMEM;
if (optlen)
memcpy(opt->__data, data, optlen);
return ip_options_get_finish(net, optp, opt, optlen);
}
void ip_forward_options(struct sk_buff *skb)
{
struct ip_options * opt = &(IPCB(skb)->opt);
unsigned char * optptr;
struct rtable *rt = skb_rtable(skb);
unsigned char *raw = skb_network_header(skb);
if (opt->rr_needaddr) {
optptr = (unsigned char *)raw + opt->rr;
ip_rt_get_source(&optptr[optptr[2]-5], rt);
opt->is_changed = 1;
}
if (opt->srr_is_hit) {
int srrptr, srrspace;
optptr = raw + opt->srr;
for ( srrptr=optptr[2], srrspace = optptr[1];
srrptr <= srrspace;
srrptr += 4
) {
if (srrptr + 3 > srrspace)
break;
if (memcmp(&rt->rt_dst, &optptr[srrptr-1], 4) == 0)
break;
}
if (srrptr + 3 <= srrspace) {
opt->is_changed = 1;
ip_rt_get_source(&optptr[srrptr-1], rt);
ip_hdr(skb)->daddr = rt->rt_dst;
optptr[2] = srrptr+4;
} else if (net_ratelimit())
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
if (opt->ts_needaddr) {
optptr = raw + opt->ts;
ip_rt_get_source(&optptr[optptr[2]-9], rt);
opt->is_changed = 1;
}
}
if (opt->is_changed) {
opt->is_changed = 0;
ip_send_check(ip_hdr(skb));
}
}
int ip_options_rcv_srr(struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
int srrspace, srrptr;
__be32 nexthop;
struct iphdr *iph = ip_hdr(skb);
unsigned char *optptr = skb_network_header(skb) + opt->srr;
struct rtable *rt = skb_rtable(skb);
struct rtable *rt2;
unsigned long orefdst;
int err;
if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)
return -EINVAL;
if (rt->rt_type == RTN_UNICAST) {
if (!opt->is_strictroute)
return 0;
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl(16<<24));
return -EINVAL;
}
if (rt->rt_type != RTN_LOCAL)
return -EINVAL;
for (srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) {
if (srrptr + 3 > srrspace) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24));
return -EINVAL;
}
memcpy(&nexthop, &optptr[srrptr-1], 4);
orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);
skb->_skb_refdst = orefdst;
return -EINVAL;
}
refdst_drop(orefdst);
if (rt2->rt_type != RTN_LOCAL)
break;
/* Superfast 8) loopback forward */
memcpy(&iph->daddr, &optptr[srrptr-1], 4);
opt->is_changed = 1;
}
if (srrptr <= srrspace) {
opt->srr_is_hit = 1;
opt->is_changed = 1;
}
return 0;
}
EXPORT_SYMBOL(ip_options_rcv_srr);
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The options processing module for ip.c
*
* Authors: A.N.Kuznetsov
*
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/route.h>
#include <net/cipso_ipv4.h>
/*
* Write options to IP header, record destination address to
* source route option, address of outgoing interface
* (we should already know it, so that this function is allowed be
* called only after routing decision) and timestamp,
* if we originate this datagram.
*
* daddr is real destination address, next hop is recorded in IP header.
* saddr is address of outgoing interface.
*/
void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
__be32 daddr, struct rtable *rt, int is_frag)
{
unsigned char *iph = skb_network_header(skb);
memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options));
memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen);
opt = &(IPCB(skb)->opt);
if (opt->srr)
memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4);
if (!is_frag) {
if (opt->rr_needaddr)
ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt);
if (opt->ts_needaddr)
ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt);
if (opt->ts_needtime) {
struct timespec tv;
__be32 midtime;
getnstimeofday(&tv);
midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
}
return;
}
if (opt->rr) {
memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]);
opt->rr = 0;
opt->rr_needaddr = 0;
}
if (opt->ts) {
memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]);
opt->ts = 0;
opt->ts_needaddr = opt->ts_needtime = 0;
}
}
/*
* Provided (sopt, skb) points to received options,
* build in dopt compiled option set appropriate for answering.
* i.e. invert SRR option, copy anothers,
* and grab room in RR/TS options.
*
* NOTE: dopt cannot point to skb.
*/
int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
{
const struct ip_options *sopt;
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
__be32 daddr;
memset(dopt, 0, sizeof(struct ip_options));
sopt = &(IPCB(skb)->opt);
if (sopt->optlen == 0)
return 0;
sptr = skb_network_header(skb);
dptr = dopt->__data;
daddr = skb_rtable(skb)->rt_spec_dst;
if (sopt->rr) {
optlen = sptr[sopt->rr+1];
soffset = sptr[sopt->rr+2];
dopt->rr = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->rr, optlen);
if (sopt->rr_needaddr && soffset <= optlen) {
if (soffset + 3 > optlen)
return -EINVAL;
dptr[2] = soffset + 4;
dopt->rr_needaddr = 1;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->ts) {
optlen = sptr[sopt->ts+1];
soffset = sptr[sopt->ts+2];
dopt->ts = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->ts, optlen);
if (soffset <= optlen) {
if (sopt->ts_needaddr) {
if (soffset + 3 > optlen)
return -EINVAL;
dopt->ts_needaddr = 1;
soffset += 4;
}
if (sopt->ts_needtime) {
if (soffset + 3 > optlen)
return -EINVAL;
if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) {
dopt->ts_needtime = 1;
soffset += 4;
} else {
dopt->ts_needtime = 0;
if (soffset + 7 <= optlen) {
__be32 addr;
memcpy(&addr, dptr+soffset-1, 4);
if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) {
dopt->ts_needtime = 1;
soffset += 8;
}
}
}
}
dptr[2] = soffset;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->srr) {
unsigned char *start = sptr+sopt->srr;
__be32 faddr;
optlen = start[1];
soffset = start[2];
doffset = 0;
if (soffset > optlen)
soffset = optlen + 1;
soffset -= 4;
if (soffset > 3) {
memcpy(&faddr, &start[soffset-1], 4);
for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4)
memcpy(&dptr[doffset-1], &start[soffset-1], 4);
/*
* RFC1812 requires to fix illegal source routes.
*/
if (memcmp(&ip_hdr(skb)->saddr,
&start[soffset + 3], 4) == 0)
doffset -= 4;
}
if (doffset > 3) {
memcpy(&start[doffset-1], &daddr, 4);
dopt->faddr = faddr;
dptr[0] = start[0];
dptr[1] = doffset+3;
dptr[2] = 4;
dptr += doffset+3;
dopt->srr = dopt->optlen + sizeof(struct iphdr);
dopt->optlen += doffset+3;
dopt->is_strictroute = sopt->is_strictroute;
}
}
if (sopt->cipso) {
optlen = sptr[sopt->cipso+1];
dopt->cipso = dopt->optlen+sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->cipso, optlen);
dptr += optlen;
dopt->optlen += optlen;
}
while (dopt->optlen & 3) {
*dptr++ = IPOPT_END;
dopt->optlen++;
}
return 0;
}
/*
* Options "fragmenting", just fill options not
* allowed in fragments with NOOPs.
* Simple and stupid 8), but the most efficient way.
*/
void ip_options_fragment(struct sk_buff * skb)
{
unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
struct ip_options * opt = &(IPCB(skb)->opt);
int l = opt->optlen;
int optlen;
while (l > 0) {
switch (*optptr) {
case IPOPT_END:
return;
case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
if (optlen<2 || optlen>l)
return;
if (!IPOPT_COPIED(*optptr))
memset(optptr, IPOPT_NOOP, optlen);
l -= optlen;
optptr += optlen;
}
opt->ts = 0;
opt->rr = 0;
opt->rr_needaddr = 0;
opt->ts_needaddr = 0;
opt->ts_needtime = 0;
}
/*
* Verify options and fill pointers in struct options.
* Caller should clear *opt, and set opt->data.
* If opt == NULL, then skb->data should point to IP header.
*/
int ip_options_compile(struct net *net,
struct ip_options * opt, struct sk_buff * skb)
{
int l;
unsigned char * iph;
unsigned char * optptr;
int optlen;
unsigned char * pp_ptr = NULL;
struct rtable *rt = NULL;
if (skb != NULL) {
rt = skb_rtable(skb);
optptr = (unsigned char *)&(ip_hdr(skb)[1]);
} else
optptr = opt->__data;
iph = optptr - sizeof(struct iphdr);
for (l = opt->optlen; l > 0; ) {
switch (*optptr) {
case IPOPT_END:
for (optptr++, l--; l>0; optptr++, l--) {
if (*optptr != IPOPT_END) {
*optptr = IPOPT_END;
opt->is_changed = 1;
}
}
goto eol;
case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
if (optlen<2 || optlen>l) {
pp_ptr = optptr;
goto error;
}
switch (*optptr) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
/* NB: cf RFC-1812 5.2.4.1 */
if (opt->srr) {
pp_ptr = optptr;
goto error;
}
if (!skb) {
if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) {
pp_ptr = optptr + 1;
goto error;
}
memcpy(&opt->faddr, &optptr[3], 4);
if (optlen > 7)
memmove(&optptr[3], &optptr[7], optlen-7);
}
opt->is_strictroute = (optptr[0] == IPOPT_SSRR);
opt->srr = optptr - iph;
break;
case IPOPT_RR:
if (opt->rr) {
pp_ptr = optptr;
goto error;
}
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
if (optptr[2]+3 > optlen) {
pp_ptr = optptr + 2;
goto error;
}
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
optptr[2] += 4;
opt->rr_needaddr = 1;
}
opt->rr = optptr - iph;
break;
case IPOPT_TIMESTAMP:
if (opt->ts) {
pp_ptr = optptr;
goto error;
}
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 5) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
__be32 *timeptr = NULL;
if (optptr[2]+3 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
switch (optptr[3]&0xF) {
case IPOPT_TS_TSONLY:
opt->ts = optptr - iph;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]-1];
opt->ts_needtime = 1;
optptr[2] += 4;
break;
case IPOPT_TS_TSANDADDR:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
opt->ts_needtime = 1;
optptr[2] += 8;
break;
case IPOPT_TS_PRESPEC:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
{
__be32 addr;
memcpy(&addr, &optptr[optptr[2]-1], 4);
if (inet_addr_type(net, addr) == RTN_UNICAST)
break;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needtime = 1;
optptr[2] += 8;
break;
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr + 3;
goto error;
}
break;
}
if (timeptr) {
struct timespec tv;
__be32 midtime;
getnstimeofday(&tv);
midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
memcpy(timeptr, &midtime, sizeof(__be32));
opt->is_changed = 1;
}
} else {
unsigned overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;
goto error;
}
opt->ts = optptr - iph;
if (skb) {
optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
opt->is_changed = 1;
}
}
break;
case IPOPT_RA:
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] == 0 && optptr[3] == 0)
opt->router_alert = optptr - iph;
break;
case IPOPT_CIPSO:
if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) {
pp_ptr = optptr;
goto error;
}
opt->cipso = optptr - iph;
if (cipso_v4_validate(skb, &optptr)) {
pp_ptr = optptr;
goto error;
}
break;
case IPOPT_SEC:
case IPOPT_SID:
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr;
goto error;
}
break;
}
l -= optlen;
optptr += optlen;
}
eol:
if (!pp_ptr)
return 0;
error:
if (skb) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
}
return -EINVAL;
}
EXPORT_SYMBOL(ip_options_compile);
/*
* Undo all the changes done by ip_options_compile().
*/
void ip_options_undo(struct ip_options * opt)
{
if (opt->srr) {
unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr);
memmove(optptr+7, optptr+3, optptr[1]-7);
memcpy(optptr+3, &opt->faddr, 4);
}
if (opt->rr_needaddr) {
unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr);
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
}
if (opt->ts) {
unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr);
if (opt->ts_needtime) {
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
if ((optptr[3]&0xF) == IPOPT_TS_PRESPEC)
optptr[2] -= 4;
}
if (opt->ts_needaddr) {
optptr[2] -= 4;
memset(&optptr[optptr[2]-1], 0, 4);
}
}
}
static struct ip_options_rcu *ip_options_get_alloc(const int optlen)
{
return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
GFP_KERNEL);
}
static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
struct ip_options_rcu *opt, int optlen)
{
while (optlen & 3)
opt->opt.__data[optlen++] = IPOPT_END;
opt->opt.optlen = optlen;
if (optlen && ip_options_compile(net, &opt->opt, NULL)) {
kfree(opt);
return -EINVAL;
}
kfree(*optp);
*optp = opt;
return 0;
}
int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
unsigned char __user *data, int optlen)
{
struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
if (!opt)
return -ENOMEM;
if (optlen && copy_from_user(opt->opt.__data, data, optlen)) {
kfree(opt);
return -EFAULT;
}
return ip_options_get_finish(net, optp, opt, optlen);
}
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
unsigned char *data, int optlen)
{
struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
if (!opt)
return -ENOMEM;
if (optlen)
memcpy(opt->opt.__data, data, optlen);
return ip_options_get_finish(net, optp, opt, optlen);
}
void ip_forward_options(struct sk_buff *skb)
{
struct ip_options * opt = &(IPCB(skb)->opt);
unsigned char * optptr;
struct rtable *rt = skb_rtable(skb);
unsigned char *raw = skb_network_header(skb);
if (opt->rr_needaddr) {
optptr = (unsigned char *)raw + opt->rr;
ip_rt_get_source(&optptr[optptr[2]-5], rt);
opt->is_changed = 1;
}
if (opt->srr_is_hit) {
int srrptr, srrspace;
optptr = raw + opt->srr;
for ( srrptr=optptr[2], srrspace = optptr[1];
srrptr <= srrspace;
srrptr += 4
) {
if (srrptr + 3 > srrspace)
break;
if (memcmp(&rt->rt_dst, &optptr[srrptr-1], 4) == 0)
break;
}
if (srrptr + 3 <= srrspace) {
opt->is_changed = 1;
ip_rt_get_source(&optptr[srrptr-1], rt);
ip_hdr(skb)->daddr = rt->rt_dst;
optptr[2] = srrptr+4;
} else if (net_ratelimit())
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
if (opt->ts_needaddr) {
optptr = raw + opt->ts;
ip_rt_get_source(&optptr[optptr[2]-9], rt);
opt->is_changed = 1;
}
}
if (opt->is_changed) {
opt->is_changed = 0;
ip_send_check(ip_hdr(skb));
}
}
int ip_options_rcv_srr(struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
int srrspace, srrptr;
__be32 nexthop;
struct iphdr *iph = ip_hdr(skb);
unsigned char *optptr = skb_network_header(skb) + opt->srr;
struct rtable *rt = skb_rtable(skb);
struct rtable *rt2;
unsigned long orefdst;
int err;
if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)
return -EINVAL;
if (rt->rt_type == RTN_UNICAST) {
if (!opt->is_strictroute)
return 0;
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl(16<<24));
return -EINVAL;
}
if (rt->rt_type != RTN_LOCAL)
return -EINVAL;
for (srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) {
if (srrptr + 3 > srrspace) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24));
return -EINVAL;
}
memcpy(&nexthop, &optptr[srrptr-1], 4);
orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);
skb->_skb_refdst = orefdst;
return -EINVAL;
}
refdst_drop(orefdst);
if (rt2->rt_type != RTN_LOCAL)
break;
/* Superfast 8) loopback forward */
memcpy(&iph->daddr, &optptr[srrptr-1], 4);
opt->is_changed = 1;
}
if (srrptr <= srrspace) {
opt->srr_is_hit = 1;
opt->is_changed = 1;
}
return 0;
}
EXPORT_SYMBOL(ip_options_rcv_srr);
|
62,660,429,710,120 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The Internet Protocol (IP) output module.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Donald Becker, <becker@super.org>
* Alan Cox, <Alan.Cox@linux.org>
* Richard Underwood
* Stefan Becker, <stefanb@yello.ping.de>
* Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Hirokazu Takahashi, <taka@valinux.co.jp>
*
* See ip_input.c for original log
*
* Fixes:
* Alan Cox : Missing nonblock feature in ip_build_xmit.
* Mike Kilburn : htons() missing in ip_build_xmit.
* Bradford Johnson: Fix faulty handling of some frames when
* no route is found.
* Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
* (in case if packet not accepted by
* output firewall rules)
* Mike McLagan : Routing by source
* Alexey Kuznetsov: use new route cache
* Andi Kleen: Fix broken PMTU recovery and remove
* some redundant tests.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Andi Kleen : Replace ip_reply with ip_send_reply.
* Andi Kleen : Split fast and slow ip_build_xmit path
* for decreased register pressure on x86
* and more readibility.
* Marc Boucher : When call_out_firewall returns FW_QUEUE,
* silently drop skb instead of failing with -EPERM.
* Detlev Wengorz : Copy protocol for fragments.
* Hirokazu Takahashi: HW checksumming for outgoing UDP
* datagrams.
* Hirokazu Takahashi: sendfile() on UDP works now.
*/
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
#include <linux/mroute.h>
#include <linux/netlink.h>
#include <linux/tcp.h>
int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
EXPORT_SYMBOL(sysctl_ip_default_ttl);
/* Generate a checksum for an outgoing IP datagram. */
__inline__ void ip_send_check(struct iphdr *iph)
{
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
EXPORT_SYMBOL(ip_send_check);
int __ip_local_out(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = htons(skb->len);
ip_send_check(iph);
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
skb_dst(skb)->dev, dst_output);
}
int ip_local_out(struct sk_buff *skb)
{
int err;
err = __ip_local_out(skb);
if (likely(err == 1))
err = dst_output(skb);
return err;
}
EXPORT_SYMBOL_GPL(ip_local_out);
/* dev_loopback_xmit for use with netfilter. */
static int ip_dev_loopback_xmit(struct sk_buff *newskb)
{
skb_reset_mac_header(newskb);
__skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(newskb));
netif_rx_ni(newskb);
return 0;
}
static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
{
int ttl = inet->uc_ttl;
if (ttl < 0)
ttl = ip4_dst_hoplimit(dst);
return ttl;
}
/*
* Add an ip header to a skbuff and send it out.
*
*/
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options *opt)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
struct iphdr *iph;
/* Build the IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
iph->protocol = sk->sk_protocol;
ip_select_ident(iph, &rt->dst, sk);
if (opt && opt->optlen) {
iph->ihl += opt->optlen>>2;
ip_options_build(skb, opt, daddr, rt, 0);
}
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/* Send it out. */
return ip_local_out(skb);
}
EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
if (skb2 == NULL) {
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
kfree_skb(skb);
skb = skb2;
}
if (dst->hh)
return neigh_hh_output(dst->hh, skb);
else if (dst->neighbour)
return dst->neighbour->output(skb);
if (net_ratelimit())
printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
kfree_skb(skb);
return -EINVAL;
}
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
{
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
static int ip_finish_output(struct sk_buff *skb)
{
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm != NULL) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(skb);
}
#endif
if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
}
int ip_mc_output(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct rtable *rt = skb_rtable(skb);
struct net_device *dev = rt->dst.dev;
/*
* If the indicated interface is up and running, send the packet.
*/
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
/*
* Multicasts are looped back for other local users
*/
if (rt->rt_flags&RTCF_MULTICAST) {
if (sk_mc_loop(sk)
#ifdef CONFIG_IP_MROUTE
/* Small optimization: do not loopback not local frames,
which returned after forwarding; they will be dropped
by ip_mr_input in any case.
Note, that local frames are looped back to be delivered
to local recipients.
This check is duplicated in ip_mr_input at the moment.
*/
&&
((rt->rt_flags & RTCF_LOCAL) ||
!(IPCB(skb)->flags & IPSKB_FORWARDED))
#endif
) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
ip_dev_loopback_xmit);
}
/* Multicasts with ttl 0 must not go beyond the host */
if (ip_hdr(skb)->ttl == 0) {
kfree_skb(skb);
return 0;
}
}
if (rt->rt_flags&RTCF_BROADCAST) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
NULL, newskb->dev, ip_dev_loopback_xmit);
}
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
skb->dev, ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
int ip_output(struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev;
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
int ip_queue_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt = inet->opt;
struct rtable *rt;
struct iphdr *iph;
int res;
/* Skip all of this if the packet is already routed,
* f.e. by something like SCTP.
*/
rcu_read_lock();
rt = skb_rtable(skb);
if (rt != NULL)
goto packet_routed;
/* Make sure we can route this packet. */
rt = (struct rtable *)__sk_dst_check(sk, 0);
if (rt == NULL) {
__be32 daddr;
/* Use correct destination address if we have options. */
daddr = inet->inet_daddr;
if(opt && opt->srr)
daddr = opt->faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(sock_net(sk), sk,
daddr, inet->inet_saddr,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
}
skb_dst_set_noref(skb, &rt->dst);
packet_routed:
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
/* Transport layer set skb->h.foo itself. */
if (opt && opt->optlen) {
iph->ihl += opt->optlen >> 2;
ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
}
ip_select_ident_more(iph, &rt->dst, sk,
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
res = ip_local_out(skb);
rcu_read_unlock();
return res;
no_route:
rcu_read_unlock();
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
EXPORT_SYMBOL(ip_queue_xmit);
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
to->mark = from->mark;
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
to->nf_trace = from->nf_trace;
#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
}
/*
* This IP datagram is too large to be sent in one piece. Break it up into
* smaller pieces (each of size equal to IP header plus
* a block of the data of the original IP data part) that will yet fit in a
* single device frame, and queue such a frame for sending.
*/
int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct iphdr *iph;
int ptr;
struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
__be16 not_last_frag;
struct rtable *rt = skb_rtable(skb);
int err = 0;
dev = rt->dst.dev;
/*
* Point into the IP datagram header.
*/
iph = ip_hdr(skb);
if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(ip_skb_dst_mtu(skb)));
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Setup starting values.
*/
hlen = iph->ihl * 4;
mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing
* one, it is not prohibited. In this case fall back to copying.
*
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frag_list(skb)) {
struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
(iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
err = 0;
offset = 0;
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
ip_send_check(iph);
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), iph, hlen);
iph = ip_hdr(frag);
iph->tot_len = htons(frag->len);
ip_copy_metadata(frag, skb);
if (offset == 0)
ip_options_fragment(frag);
offset += skb->len - hlen;
iph->frag_off = htons(offset>>3);
if (frag->next != NULL)
iph->frag_off |= htons(IP_MF);
/* Ready, complete checksum */
ip_send_check(iph);
}
err = output(skb);
if (!err)
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
if (err || !frag)
break;
skb = frag;
frag = skb->next;
skb->next = NULL;
}
if (err == 0) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return 0;
}
while (frag) {
skb = frag->next;
kfree_skb(frag);
frag = skb;
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
slow_path_clean:
skb_walk_frags(skb, frag2) {
if (frag2 == frag)
break;
frag2->sk = NULL;
frag2->destructor = NULL;
skb->truesize += frag2->truesize;
}
}
slow_path:
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
/* for bridged IP traffic encapsulated inside f.e. a vlan header,
* we need to make room for the encapsulating header
*/
ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
/*
* Fragment the datagram.
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & htons(IP_MF);
/*
* Keep copying data until we run out.
*/
while (left > 0) {
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
/* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
}
/*
* Allocate buffer.
*/
if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
err = -ENOMEM;
goto fail;
}
/*
* Set up data on packet
*/
ip_copy_metadata(skb2, skb);
skb_reserve(skb2, ll_rs);
skb_put(skb2, len + hlen);
skb_reset_network_header(skb2);
skb2->transport_header = skb2->network_header + hlen;
/*
* Charge the memory for the fragment to any owner
* it might possess
*/
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
/*
* Copy the packet header into the new buffer.
*/
skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
/*
* Copy a block of the IP datagram.
*/
if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
BUG();
left -= len;
/*
* Fill in the new header fields.
*/
iph = ip_hdr(skb2);
iph->frag_off = htons((offset >> 3));
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
* on the initial skb, so that all the following fragments
* will inherit fixed options.
*/
if (offset == 0)
ip_options_fragment(skb);
/*
* Added AC : If we are fragmenting a fragment that's not the
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= htons(IP_MF);
ptr += len;
offset += len;
/*
* Put this fragment into the sending queue.
*/
iph->tot_len = htons(len + hlen);
ip_send_check(iph);
err = output(skb2);
if (err)
goto fail;
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
}
kfree_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
}
EXPORT_SYMBOL(ip_fragment);
int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
{
struct iovec *iov = from;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (memcpy_fromiovecend(to, iov, offset, len) < 0)
return -EFAULT;
} else {
__wsum csum = 0;
if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, odd);
}
return 0;
}
EXPORT_SYMBOL(ip_generic_getfrag);
static inline __wsum
csum_page(struct page *page, int offset, int copy)
{
char *kaddr;
__wsum csum;
kaddr = kmap(page);
csum = csum_partial(kaddr + offset, copy, 0);
kunmap(page);
return csum;
}
static inline int ip_ufo_append_data(struct sock *sk,
struct sk_buff_head *queue,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
int transhdrlen, int mtu, unsigned int flags)
{
struct sk_buff *skb;
int err;
/* There is support for UDP fragmentation offload by network
* device, so create one single skb packet containing complete
* udp datagram
*/
if ((skb = skb_peek_tail(queue)) == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
if (skb == NULL)
return err;
/* reserve space for Hardware header */
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
/* specify the length of each IP datagram fragment */
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
__skb_queue_tail(queue, skb);
}
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
struct inet_cork *cork,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
int copy;
int err;
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
exthdrlen = transhdrlen ? rt->dst.header_len : 0;
length += exthdrlen;
transhdrlen += exthdrlen;
mtu = cork->fragsize;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
}
/*
* transhdrlen > 0 means that this is the first fragment and we wish
* it won't be fragmented in the future.
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
rt->dst.dev->features & NETIF_F_V4_CSUM &&
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
skb = skb_peek_tail(queue);
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
mtu, flags);
if (err)
goto error;
return 0;
}
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
* each of segments is IP fragment ready for sending to network after
* adding appropriate IP header.
*/
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = mtu - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
if (skb_prev)
fraggap = skb_prev->len - maxfraglen;
else
fraggap = 0;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = fraglen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap) {
alloclen += rt->dst.trailer_len;
/* make sure mtu is not reached */
if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
datalen -= ALIGN(rt->dst.trailer_len, 8);
}
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
else
/* only the initial fragment is
time stamped */
cork->tx_flags = 0;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
skb_shinfo(skb)->tx_flags = cork->tx_flags;
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen);
skb_set_network_header(skb, exthdrlen);
skb->transport_header = (skb->network_header +
fragheaderlen);
data += fragheaderlen;
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
struct page *page = cork->page;
int off = cork->off;
unsigned int left;
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
if (page != frag->page) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
get_page(page);
skb_fill_page_desc(skb, i, page, off, 0);
frag = &skb_shinfo(skb)->frags[i];
}
} else if (i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
}
cork->page = page;
cork->off = 0;
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
} else {
err = -EMSGSIZE;
goto error;
}
if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
cork->off += copy;
frag->size += copy;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
struct ipcm_cookie *ipc, struct rtable **rtp)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt;
struct rtable *rt;
/*
* setup for corking.
*/
opt = ipc->opt;
if (opt) {
if (cork->opt == NULL) {
cork->opt = kmalloc(sizeof(struct ip_options) + 40,
sk->sk_allocation);
if (unlikely(cork->opt == NULL))
return -ENOBUFS;
}
memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen);
cork->flags |= IPCORK_OPT;
cork->addr = ipc->addr;
}
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
/*
* We steal reference to this route, caller should not release it
*/
*rtp = NULL;
cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
rt->dst.dev->mtu : dst_mtu(rt->dst.path);
cork->dst = &rt->dst;
cork->length = 0;
cork->tx_flags = ipc->tx_flags;
cork->page = NULL;
cork->off = 0;
return 0;
}
/*
* ip_append_data() and ip_append_page() can make one large IP datagram
* from many pieces of data. Each pieces will be holded on the socket
* until ip_push_pending_frames() is called. Each piece can be a page
* or non-page data.
*
* Not only UDP, other transport protocols - e.g. raw sockets - can use
* this interface potentially.
*
* LATER: length must be adjusted by pad at tail, when it is required.
*/
int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
int err;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue)) {
err = ip_setup_cork(sk, &inet->cork, ipc, rtp);
if (err)
return err;
} else {
transhdrlen = 0;
}
return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag,
from, length, transhdrlen, flags);
}
ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct rtable *rt;
struct ip_options *opt = NULL;
int hh_len;
int mtu;
int len;
int err;
unsigned int maxfraglen, fragheaderlen, fraggap;
if (inet->hdrincl)
return -EPERM;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue))
return -EINVAL;
rt = (struct rtable *)inet->cork.dst;
if (inet->cork.flags & IPCORK_OPT)
opt = inet->cork.opt;
if (!(rt->dst.dev->features&NETIF_F_SG))
return -EOPNOTSUPP;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
mtu = inet->cork.fragsize;
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
return -EMSGSIZE;
}
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
return -EINVAL;
inet->cork.length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
while (size > 0) {
int i;
if (skb_is_gso(skb))
len = size;
else {
/* Check if the remaining data fits into current packet. */
len = mtu - skb->len;
if (len < size)
len = maxfraglen - skb->len;
}
if (len <= 0) {
struct sk_buff *skb_prev;
int alloclen;
skb_prev = skb;
fraggap = skb_prev->len - maxfraglen;
alloclen = fragheaderlen + hh_len + fraggap + 15;
skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
if (unlikely(!skb)) {
err = -ENOBUFS;
goto error;
}
/*
* Fill in the control structures
*/
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
skb_reserve(skb, hh_len);
/*
* Find where to start putting bytes.
*/
skb_put(skb, fragheaderlen + fraggap);
skb_reset_network_header(skb);
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(skb_prev,
maxfraglen,
skb_transport_header(skb),
fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
pskb_trim_unique(skb_prev, maxfraglen);
}
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
i = skb_shinfo(skb)->nr_frags;
if (len > size)
len = size;
if (skb_can_coalesce(skb, i, page, offset)) {
skb_shinfo(skb)->frags[i-1].size += len;
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, len);
} else {
err = -EMSGSIZE;
goto error;
}
if (skb->ip_summed == CHECKSUM_NONE) {
__wsum csum;
csum = csum_page(page, offset, len);
skb->csum = csum_block_add(skb->csum, csum, skb->len);
}
skb->len += len;
skb->data_len += len;
skb->truesize += len;
atomic_add(len, &sk->sk_wmem_alloc);
offset += len;
size -= len;
}
return 0;
error:
inet->cork.length -= size;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
static void ip_cork_release(struct inet_cork *cork)
{
cork->flags &= ~IPCORK_OPT;
kfree(cork->opt);
cork->opt = NULL;
dst_release(cork->dst);
cork->dst = NULL;
}
/*
* Combined all pending IP fragments on the socket as one IP datagram
* and push them out.
*/
struct sk_buff *__ip_make_skb(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
__be16 df = 0;
__u8 ttl;
if ((skb = __skb_dequeue(queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
skb->truesize += tmp_skb->truesize;
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
}
/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
if (inet->pmtudisc < IP_PMTUDISC_DO)
skb->local_df = 1;
/* DF bit is set when we want to see DF on outgoing frames.
* If local_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc >= IP_PMTUDISC_DO ||
(skb->len <= dst_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
ttl = ip_select_ttl(inet, &rt->dst);
iph = (struct iphdr *)skb->data;
iph->version = 4;
iph->ihl = 5;
if (opt) {
iph->ihl += opt->optlen>>2;
ip_options_build(skb, opt, cork->addr, rt, 0);
}
iph->tos = inet->tos;
iph->frag_off = df;
ip_select_ident(iph, &rt->dst, sk);
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
*/
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
ip_cork_release(cork);
out:
return skb;
}
int ip_send_skb(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
int err;
err = ip_local_out(skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
}
return err;
}
int ip_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
skb = ip_finish_skb(sk);
if (!skb)
return 0;
/* Netfilter gets whole the not fragmented skb. */
return ip_send_skb(skb);
}
/*
* Throw away all pending data on the socket.
*/
static void __ip_flush_pending_frames(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(queue)) != NULL)
kfree_skb(skb);
ip_cork_release(cork);
}
void ip_flush_pending_frames(struct sock *sk)
{
__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
}
struct sk_buff *ip_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_cork cork = {};
struct sk_buff_head queue;
int err;
if (flags & MSG_PROBE)
return NULL;
__skb_queue_head_init(&queue);
err = ip_setup_cork(sk, &cork, ipc, rtp);
if (err)
return ERR_PTR(err);
err = __ip_append_data(sk, &queue, &cork, getfrag,
from, length, transhdrlen, flags);
if (err) {
__ip_flush_pending_frames(sk, &queue, &cork);
return ERR_PTR(err);
}
return __ip_make_skb(sk, &queue, &cork);
}
/*
* Fetch data from kernel space and fill in checksum if needed.
*/
static int ip_reply_glue_bits(void *dptr, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
__wsum csum;
csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
skb->csum = csum_block_add(skb->csum, csum, odd);
return 0;
}
/*
* Generic function to send a packet as reply to another packet.
* Used to send TCP resets so far. ICMP should use this function too.
*
* Should run single threaded per socket because it uses the sock
* structure to pass arguments.
*/
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len)
{
struct inet_sock *inet = inet_sk(sk);
struct {
struct ip_options opt;
char data[40];
} replyopts;
struct ipcm_cookie ipc;
__be32 daddr;
struct rtable *rt = skb_rtable(skb);
if (ip_options_echo(&replyopts.opt, skb))
return;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (replyopts.opt.optlen) {
ipc.opt = &replyopts.opt;
if (ipc.opt->srr)
daddr = replyopts.opt.faddr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(ip_hdr(skb)->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol,
ip_reply_arg_flowi_flags(arg),
daddr, rt->rt_spec_dst,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt))
return;
}
/* And let IP do all the hard work.
This chunk is not reenterable, hence spinlock.
Note that it uses the fact, that this function is called
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
inet->tos = ip_hdr(skb)->tos;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(skb) +
arg->csumoffset) = csum_fold(csum_add(skb->csum,
arg->csum));
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
bh_unlock_sock(sk);
ip_rt_put(rt);
}
void __init ip_init(void)
{
ip_rt_init();
inet_initpeers();
#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
igmp_mc_proc_init();
#endif
}
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The Internet Protocol (IP) output module.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Donald Becker, <becker@super.org>
* Alan Cox, <Alan.Cox@linux.org>
* Richard Underwood
* Stefan Becker, <stefanb@yello.ping.de>
* Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Hirokazu Takahashi, <taka@valinux.co.jp>
*
* See ip_input.c for original log
*
* Fixes:
* Alan Cox : Missing nonblock feature in ip_build_xmit.
* Mike Kilburn : htons() missing in ip_build_xmit.
* Bradford Johnson: Fix faulty handling of some frames when
* no route is found.
* Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
* (in case if packet not accepted by
* output firewall rules)
* Mike McLagan : Routing by source
* Alexey Kuznetsov: use new route cache
* Andi Kleen: Fix broken PMTU recovery and remove
* some redundant tests.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Andi Kleen : Replace ip_reply with ip_send_reply.
* Andi Kleen : Split fast and slow ip_build_xmit path
* for decreased register pressure on x86
* and more readibility.
* Marc Boucher : When call_out_firewall returns FW_QUEUE,
* silently drop skb instead of failing with -EPERM.
* Detlev Wengorz : Copy protocol for fragments.
* Hirokazu Takahashi: HW checksumming for outgoing UDP
* datagrams.
* Hirokazu Takahashi: sendfile() on UDP works now.
*/
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
#include <linux/mroute.h>
#include <linux/netlink.h>
#include <linux/tcp.h>
int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
EXPORT_SYMBOL(sysctl_ip_default_ttl);
/* Generate a checksum for an outgoing IP datagram. */
__inline__ void ip_send_check(struct iphdr *iph)
{
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
EXPORT_SYMBOL(ip_send_check);
int __ip_local_out(struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = htons(skb->len);
ip_send_check(iph);
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
skb_dst(skb)->dev, dst_output);
}
int ip_local_out(struct sk_buff *skb)
{
int err;
err = __ip_local_out(skb);
if (likely(err == 1))
err = dst_output(skb);
return err;
}
EXPORT_SYMBOL_GPL(ip_local_out);
/* dev_loopback_xmit for use with netfilter. */
static int ip_dev_loopback_xmit(struct sk_buff *newskb)
{
skb_reset_mac_header(newskb);
__skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(newskb));
netif_rx_ni(newskb);
return 0;
}
static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
{
int ttl = inet->uc_ttl;
if (ttl < 0)
ttl = ip4_dst_hoplimit(dst);
return ttl;
}
/*
* Add an ip header to a skbuff and send it out.
*
*/
int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
struct iphdr *iph;
/* Build the IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = rt->rt_dst;
iph->saddr = rt->rt_src;
iph->protocol = sk->sk_protocol;
ip_select_ident(iph, &rt->dst, sk);
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
ip_options_build(skb, &opt->opt, daddr, rt, 0);
}
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/* Send it out. */
return ip_local_out(skb);
}
EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
if (skb2 == NULL) {
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
kfree_skb(skb);
skb = skb2;
}
if (dst->hh)
return neigh_hh_output(dst->hh, skb);
else if (dst->neighbour)
return dst->neighbour->output(skb);
if (net_ratelimit())
printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
kfree_skb(skb);
return -EINVAL;
}
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
{
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
static int ip_finish_output(struct sk_buff *skb)
{
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm != NULL) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(skb);
}
#endif
if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
}
int ip_mc_output(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct rtable *rt = skb_rtable(skb);
struct net_device *dev = rt->dst.dev;
/*
* If the indicated interface is up and running, send the packet.
*/
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
/*
* Multicasts are looped back for other local users
*/
if (rt->rt_flags&RTCF_MULTICAST) {
if (sk_mc_loop(sk)
#ifdef CONFIG_IP_MROUTE
/* Small optimization: do not loopback not local frames,
which returned after forwarding; they will be dropped
by ip_mr_input in any case.
Note, that local frames are looped back to be delivered
to local recipients.
This check is duplicated in ip_mr_input at the moment.
*/
&&
((rt->rt_flags & RTCF_LOCAL) ||
!(IPCB(skb)->flags & IPSKB_FORWARDED))
#endif
) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
ip_dev_loopback_xmit);
}
/* Multicasts with ttl 0 must not go beyond the host */
if (ip_hdr(skb)->ttl == 0) {
kfree_skb(skb);
return 0;
}
}
if (rt->rt_flags&RTCF_BROADCAST) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
NULL, newskb->dev, ip_dev_loopback_xmit);
}
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
skb->dev, ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
int ip_output(struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev;
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
ip_finish_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
int ip_queue_xmit(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ip_options_rcu *inet_opt;
struct rtable *rt;
struct iphdr *iph;
int res;
/* Skip all of this if the packet is already routed,
* f.e. by something like SCTP.
*/
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
rt = skb_rtable(skb);
if (rt != NULL)
goto packet_routed;
/* Make sure we can route this packet. */
rt = (struct rtable *)__sk_dst_check(sk, 0);
if (rt == NULL) {
__be32 daddr;
/* Use correct destination address if we have options. */
daddr = inet->inet_daddr;
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(sock_net(sk), sk,
daddr, inet->inet_saddr,
inet->inet_dport,
inet->inet_sport,
sk->sk_protocol,
RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
}
skb_dst_set_noref(skb, &rt->dst);
packet_routed:
if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_dst != rt->rt_gateway)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
/* Transport layer set skb->h.foo itself. */
if (inet_opt && inet_opt->opt.optlen) {
iph->ihl += inet_opt->opt.optlen >> 2;
ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
}
ip_select_ident_more(iph, &rt->dst, sk,
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
res = ip_local_out(skb);
rcu_read_unlock();
return res;
no_route:
rcu_read_unlock();
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
EXPORT_SYMBOL(ip_queue_xmit);
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
to->mark = from->mark;
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
to->nf_trace = from->nf_trace;
#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
}
/*
* This IP datagram is too large to be sent in one piece. Break it up into
* smaller pieces (each of size equal to IP header plus
* a block of the data of the original IP data part) that will yet fit in a
* single device frame, and queue such a frame for sending.
*/
int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct iphdr *iph;
int ptr;
struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
__be16 not_last_frag;
struct rtable *rt = skb_rtable(skb);
int err = 0;
dev = rt->dst.dev;
/*
* Point into the IP datagram header.
*/
iph = ip_hdr(skb);
if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(ip_skb_dst_mtu(skb)));
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Setup starting values.
*/
hlen = iph->ihl * 4;
mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing
* one, it is not prohibited. In this case fall back to copying.
*
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frag_list(skb)) {
struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
(iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
err = 0;
offset = 0;
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
ip_send_check(iph);
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), iph, hlen);
iph = ip_hdr(frag);
iph->tot_len = htons(frag->len);
ip_copy_metadata(frag, skb);
if (offset == 0)
ip_options_fragment(frag);
offset += skb->len - hlen;
iph->frag_off = htons(offset>>3);
if (frag->next != NULL)
iph->frag_off |= htons(IP_MF);
/* Ready, complete checksum */
ip_send_check(iph);
}
err = output(skb);
if (!err)
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
if (err || !frag)
break;
skb = frag;
frag = skb->next;
skb->next = NULL;
}
if (err == 0) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return 0;
}
while (frag) {
skb = frag->next;
kfree_skb(frag);
frag = skb;
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
slow_path_clean:
skb_walk_frags(skb, frag2) {
if (frag2 == frag)
break;
frag2->sk = NULL;
frag2->destructor = NULL;
skb->truesize += frag2->truesize;
}
}
slow_path:
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
/* for bridged IP traffic encapsulated inside f.e. a vlan header,
* we need to make room for the encapsulating header
*/
ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
/*
* Fragment the datagram.
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & htons(IP_MF);
/*
* Keep copying data until we run out.
*/
while (left > 0) {
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
/* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
}
/*
* Allocate buffer.
*/
if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
err = -ENOMEM;
goto fail;
}
/*
* Set up data on packet
*/
ip_copy_metadata(skb2, skb);
skb_reserve(skb2, ll_rs);
skb_put(skb2, len + hlen);
skb_reset_network_header(skb2);
skb2->transport_header = skb2->network_header + hlen;
/*
* Charge the memory for the fragment to any owner
* it might possess
*/
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
/*
* Copy the packet header into the new buffer.
*/
skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
/*
* Copy a block of the IP datagram.
*/
if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
BUG();
left -= len;
/*
* Fill in the new header fields.
*/
iph = ip_hdr(skb2);
iph->frag_off = htons((offset >> 3));
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
* on the initial skb, so that all the following fragments
* will inherit fixed options.
*/
if (offset == 0)
ip_options_fragment(skb);
/*
* Added AC : If we are fragmenting a fragment that's not the
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= htons(IP_MF);
ptr += len;
offset += len;
/*
* Put this fragment into the sending queue.
*/
iph->tot_len = htons(len + hlen);
ip_send_check(iph);
err = output(skb2);
if (err)
goto fail;
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
}
kfree_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
}
EXPORT_SYMBOL(ip_fragment);
int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
{
struct iovec *iov = from;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (memcpy_fromiovecend(to, iov, offset, len) < 0)
return -EFAULT;
} else {
__wsum csum = 0;
if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, odd);
}
return 0;
}
EXPORT_SYMBOL(ip_generic_getfrag);
static inline __wsum
csum_page(struct page *page, int offset, int copy)
{
char *kaddr;
__wsum csum;
kaddr = kmap(page);
csum = csum_partial(kaddr + offset, copy, 0);
kunmap(page);
return csum;
}
static inline int ip_ufo_append_data(struct sock *sk,
struct sk_buff_head *queue,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
int transhdrlen, int mtu, unsigned int flags)
{
struct sk_buff *skb;
int err;
/* There is support for UDP fragmentation offload by network
* device, so create one single skb packet containing complete
* udp datagram
*/
if ((skb = skb_peek_tail(queue)) == NULL) {
skb = sock_alloc_send_skb(sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
if (skb == NULL)
return err;
/* reserve space for Hardware header */
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
/* specify the length of each IP datagram fragment */
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
__skb_queue_tail(queue, skb);
}
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
}
static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
struct inet_cork *cork,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
int copy;
int err;
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
exthdrlen = transhdrlen ? rt->dst.header_len : 0;
length += exthdrlen;
transhdrlen += exthdrlen;
mtu = cork->fragsize;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
}
/*
* transhdrlen > 0 means that this is the first fragment and we wish
* it won't be fragmented in the future.
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
rt->dst.dev->features & NETIF_F_V4_CSUM &&
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
skb = skb_peek_tail(queue);
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
mtu, flags);
if (err)
goto error;
return 0;
}
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
* each of segments is IP fragment ready for sending to network after
* adding appropriate IP header.
*/
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = mtu - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
if (skb_prev)
fraggap = skb_prev->len - maxfraglen;
else
fraggap = 0;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = fraglen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap) {
alloclen += rt->dst.trailer_len;
/* make sure mtu is not reached */
if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
datalen -= ALIGN(rt->dst.trailer_len, 8);
}
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
else
/* only the initial fragment is
time stamped */
cork->tx_flags = 0;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
skb_shinfo(skb)->tx_flags = cork->tx_flags;
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen);
skb_set_network_header(skb, exthdrlen);
skb->transport_header = (skb->network_header +
fragheaderlen);
data += fragheaderlen;
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
struct page *page = cork->page;
int off = cork->off;
unsigned int left;
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
if (page != frag->page) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
get_page(page);
skb_fill_page_desc(skb, i, page, off, 0);
frag = &skb_shinfo(skb)->frags[i];
}
} else if (i < MAX_SKB_FRAGS) {
if (copy > PAGE_SIZE)
copy = PAGE_SIZE;
page = alloc_pages(sk->sk_allocation, 0);
if (page == NULL) {
err = -ENOMEM;
goto error;
}
cork->page = page;
cork->off = 0;
skb_fill_page_desc(skb, i, page, 0, 0);
frag = &skb_shinfo(skb)->frags[i];
} else {
err = -EMSGSIZE;
goto error;
}
if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
cork->off += copy;
frag->size += copy;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
struct ipcm_cookie *ipc, struct rtable **rtp)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_options_rcu *opt;
struct rtable *rt;
/*
* setup for corking.
*/
opt = ipc->opt;
if (opt) {
if (cork->opt == NULL) {
cork->opt = kmalloc(sizeof(struct ip_options) + 40,
sk->sk_allocation);
if (unlikely(cork->opt == NULL))
return -ENOBUFS;
}
memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
cork->flags |= IPCORK_OPT;
cork->addr = ipc->addr;
}
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
/*
* We steal reference to this route, caller should not release it
*/
*rtp = NULL;
cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
rt->dst.dev->mtu : dst_mtu(rt->dst.path);
cork->dst = &rt->dst;
cork->length = 0;
cork->tx_flags = ipc->tx_flags;
cork->page = NULL;
cork->off = 0;
return 0;
}
/*
* ip_append_data() and ip_append_page() can make one large IP datagram
* from many pieces of data. Each pieces will be holded on the socket
* until ip_push_pending_frames() is called. Each piece can be a page
* or non-page data.
*
* Not only UDP, other transport protocols - e.g. raw sockets - can use
* this interface potentially.
*
* LATER: length must be adjusted by pad at tail, when it is required.
*/
int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
int err;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue)) {
err = ip_setup_cork(sk, &inet->cork, ipc, rtp);
if (err)
return err;
} else {
transhdrlen = 0;
}
return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag,
from, length, transhdrlen, flags);
}
ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct rtable *rt;
struct ip_options *opt = NULL;
int hh_len;
int mtu;
int len;
int err;
unsigned int maxfraglen, fragheaderlen, fraggap;
if (inet->hdrincl)
return -EPERM;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue))
return -EINVAL;
rt = (struct rtable *)inet->cork.dst;
if (inet->cork.flags & IPCORK_OPT)
opt = inet->cork.opt;
if (!(rt->dst.dev->features&NETIF_F_SG))
return -EOPNOTSUPP;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
mtu = inet->cork.fragsize;
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
return -EMSGSIZE;
}
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
return -EINVAL;
inet->cork.length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
while (size > 0) {
int i;
if (skb_is_gso(skb))
len = size;
else {
/* Check if the remaining data fits into current packet. */
len = mtu - skb->len;
if (len < size)
len = maxfraglen - skb->len;
}
if (len <= 0) {
struct sk_buff *skb_prev;
int alloclen;
skb_prev = skb;
fraggap = skb_prev->len - maxfraglen;
alloclen = fragheaderlen + hh_len + fraggap + 15;
skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
if (unlikely(!skb)) {
err = -ENOBUFS;
goto error;
}
/*
* Fill in the control structures
*/
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
skb_reserve(skb, hh_len);
/*
* Find where to start putting bytes.
*/
skb_put(skb, fragheaderlen + fraggap);
skb_reset_network_header(skb);
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(skb_prev,
maxfraglen,
skb_transport_header(skb),
fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
pskb_trim_unique(skb_prev, maxfraglen);
}
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
i = skb_shinfo(skb)->nr_frags;
if (len > size)
len = size;
if (skb_can_coalesce(skb, i, page, offset)) {
skb_shinfo(skb)->frags[i-1].size += len;
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, len);
} else {
err = -EMSGSIZE;
goto error;
}
if (skb->ip_summed == CHECKSUM_NONE) {
__wsum csum;
csum = csum_page(page, offset, len);
skb->csum = csum_block_add(skb->csum, csum, skb->len);
}
skb->len += len;
skb->data_len += len;
skb->truesize += len;
atomic_add(len, &sk->sk_wmem_alloc);
offset += len;
size -= len;
}
return 0;
error:
inet->cork.length -= size;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
static void ip_cork_release(struct inet_cork *cork)
{
cork->flags &= ~IPCORK_OPT;
kfree(cork->opt);
cork->opt = NULL;
dst_release(cork->dst);
cork->dst = NULL;
}
/*
* Combined all pending IP fragments on the socket as one IP datagram
* and push them out.
*/
struct sk_buff *__ip_make_skb(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
__be16 df = 0;
__u8 ttl;
if ((skb = __skb_dequeue(queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
skb->truesize += tmp_skb->truesize;
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
}
/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
if (inet->pmtudisc < IP_PMTUDISC_DO)
skb->local_df = 1;
/* DF bit is set when we want to see DF on outgoing frames.
* If local_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc >= IP_PMTUDISC_DO ||
(skb->len <= dst_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
ttl = ip_select_ttl(inet, &rt->dst);
iph = (struct iphdr *)skb->data;
iph->version = 4;
iph->ihl = 5;
if (opt) {
iph->ihl += opt->optlen>>2;
ip_options_build(skb, opt, cork->addr, rt, 0);
}
iph->tos = inet->tos;
iph->frag_off = df;
ip_select_ident(iph, &rt->dst, sk);
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
*/
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
ip_cork_release(cork);
out:
return skb;
}
int ip_send_skb(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
int err;
err = ip_local_out(skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
}
return err;
}
int ip_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
skb = ip_finish_skb(sk);
if (!skb)
return 0;
/* Netfilter gets whole the not fragmented skb. */
return ip_send_skb(skb);
}
/*
* Throw away all pending data on the socket.
*/
static void __ip_flush_pending_frames(struct sock *sk,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(queue)) != NULL)
kfree_skb(skb);
ip_cork_release(cork);
}
void ip_flush_pending_frames(struct sock *sk)
{
__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork);
}
struct sk_buff *ip_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_cork cork = {};
struct sk_buff_head queue;
int err;
if (flags & MSG_PROBE)
return NULL;
__skb_queue_head_init(&queue);
err = ip_setup_cork(sk, &cork, ipc, rtp);
if (err)
return ERR_PTR(err);
err = __ip_append_data(sk, &queue, &cork, getfrag,
from, length, transhdrlen, flags);
if (err) {
__ip_flush_pending_frames(sk, &queue, &cork);
return ERR_PTR(err);
}
return __ip_make_skb(sk, &queue, &cork);
}
/*
* Fetch data from kernel space and fill in checksum if needed.
*/
static int ip_reply_glue_bits(void *dptr, char *to, int offset,
int len, int odd, struct sk_buff *skb)
{
__wsum csum;
csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
skb->csum = csum_block_add(skb->csum, csum, odd);
return 0;
}
/*
* Generic function to send a packet as reply to another packet.
* Used to send TCP resets so far. ICMP should use this function too.
*
* Should run single threaded per socket because it uses the sock
* structure to pass arguments.
*/
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_options_data replyopts;
struct ipcm_cookie ipc;
__be32 daddr;
struct rtable *rt = skb_rtable(skb);
if (ip_options_echo(&replyopts.opt.opt, skb))
return;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
ipc.tx_flags = 0;
if (replyopts.opt.opt.optlen) {
ipc.opt = &replyopts.opt;
if (replyopts.opt.opt.srr)
daddr = replyopts.opt.opt.faddr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(ip_hdr(skb)->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol,
ip_reply_arg_flowi_flags(arg),
daddr, rt->rt_spec_dst,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt))
return;
}
/* And let IP do all the hard work.
This chunk is not reenterable, hence spinlock.
Note that it uses the fact, that this function is called
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
inet->tos = ip_hdr(skb)->tos;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
*((__sum16 *)skb_transport_header(skb) +
arg->csumoffset) = csum_fold(csum_add(skb->csum,
arg->csum));
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
bh_unlock_sock(sk);
ip_rt_put(rt);
}
void __init ip_init(void)
{
ip_rt_init();
inet_initpeers();
#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
igmp_mc_proc_init();
#endif
}
|
124,779,137,280,043 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The IP to API glue.
*
* Authors: see ip.c
*
* Fixes:
* Many : Split from ip.c , see ip.c for history.
* Martin Mares : TOS setting fixed.
* Alan Cox : Fixed a couple of oopses in Martin's
* TOS tweaks.
* Mike McLagan : Routing by source
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/tcp_states.h>
#include <linux/udp.h>
#include <linux/igmp.h>
#include <linux/netfilter.h>
#include <linux/route.h>
#include <linux/mroute.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/transp_v6.h>
#endif
#include <linux/errqueue.h>
#include <asm/uaccess.h>
#define IP_CMSG_PKTINFO 1
#define IP_CMSG_TTL 2
#define IP_CMSG_TOS 4
#define IP_CMSG_RECVOPTS 8
#define IP_CMSG_RETOPTS 16
#define IP_CMSG_PASSSEC 32
#define IP_CMSG_ORIGDSTADDR 64
/*
* SOL_IP control messages.
*/
static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
struct in_pktinfo info;
struct rtable *rt = skb_rtable(skb);
info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
if (rt) {
info.ipi_ifindex = rt->rt_iif;
info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
} else {
info.ipi_ifindex = 0;
info.ipi_spec_dst.s_addr = 0;
}
put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
{
int ttl = ip_hdr(skb)->ttl;
put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
}
static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
{
put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
}
static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
{
if (IPCB(skb)->opt.optlen == 0)
return;
put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
ip_hdr(skb) + 1);
}
static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options * opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
if (ip_options_echo(opt, skb)) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
ip_options_undo(opt);
put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
}
static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
{
char *secdata;
u32 seclen, secid;
int err;
err = security_socket_getpeersec_dgram(NULL, skb, &secid);
if (err)
return;
err = security_secid_to_secctx(secid, &secdata, &seclen);
if (err)
return;
put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
security_release_secctx(secdata, seclen);
}
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
const struct iphdr *iph = ip_hdr(skb);
__be16 *ports = (__be16 *)skb_transport_header(skb);
if (skb_transport_offset(skb) + 4 > skb->len)
return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = iph->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned flags = inet->cmsg_flags;
/* Ordered by supposed usage frequency */
if (flags & 1)
ip_cmsg_recv_pktinfo(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_ttl(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_tos(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_opts(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_retopts(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_security(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_dstaddr(msg, skb);
}
EXPORT_SYMBOL(ip_cmsg_recv);
int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
{
int err;
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
case IP_RETOPTS:
err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
err < 40 ? err : 40);
if (err)
return err;
break;
case IP_PKTINFO:
{
struct in_pktinfo *info;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
default:
return -EINVAL;
}
}
return 0;
}
/* Special input handler for packets caught by router alert option.
They are selected only by protocol field, and then processed likely
local ones; but only if someone wants them! Otherwise, router
not running rsvpd will kill RSVP.
It is user level problem, what it will make with them.
I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
static void ip_ra_destroy_rcu(struct rcu_head *head)
{
struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
sock_put(ra->saved_sk);
kfree(ra);
}
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
struct ip_ra_chain *ra, *new_ra;
struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
for (rap = &ip_ra_chain;
(ra = rcu_dereference_protected(*rap,
lockdep_is_held(&ip_ra_lock))) != NULL;
rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
}
/* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL;
rcu_assign_pointer(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
ra->destructor(sk);
/*
* Delay sock_put(sk) and kfree(ra) after one rcu grace
* period. This guarantee ip_call_ra_chain() dont need
* to mess with socket refcounts.
*/
ra->saved_sk = sk;
call_rcu(&ra->rcu, ip_ra_destroy_rcu);
return 0;
}
}
if (new_ra == NULL) {
spin_unlock_bh(&ip_ra_lock);
return -ENOBUFS;
}
new_ra->sk = sk;
new_ra->destructor = destructor;
new_ra->next = ra;
rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
spin_unlock_bh(&ip_ra_lock);
return 0;
}
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct sock_exterr_skb *serr;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
serr->ee.ee_type = icmp_hdr(skb)->type;
serr->ee.ee_code = icmp_hdr(skb)->code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
if (skb_pull(skb, payload - skb->data) != NULL) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
}
kfree_skb(skb);
}
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
struct iphdr *iph;
struct sk_buff *skb;
if (!inet->recverr)
return;
skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
if (!skb)
return;
skb_put(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->daddr = daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_type = 0;
serr->ee.ee_code = 0;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
serr->port = port;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
/*
* Handle MSG_ERRQUEUE
*/
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
{
struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2;
struct sockaddr_in *sin;
struct {
struct sock_extended_err ee;
struct sockaddr_in offender;
} errhdr;
int err;
int copied;
err = -EAGAIN;
skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto out_free_skb;
sock_recv_timestamp(msg, sk, skb);
serr = SKB_EXT_ERR(skb);
sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
sin->sin_port = serr->port;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
sin->sin_family = AF_UNSPEC;
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
/* Now we could try to dump offended packet options */
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
/* Reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
skb2 = skb_peek(&sk->sk_error_queue);
if (skb2 != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else
spin_unlock_bh(&sk->sk_error_queue.lock);
out_free_skb:
kfree_skb(skb);
out:
return err;
}
/*
* Socket option code for IP. This is the end of the line after any
* TCP,UDP etc options on an IP socket.
*/
static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
(1<<IP_RETOPTS) | (1<<IP_TOS) |
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
(1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
optname == IP_RECVORIGDSTADDR) {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
} else if (optlen >= sizeof(char)) {
unsigned char ucval;
if (get_user(ucval, (unsigned char __user *) optval))
return -EFAULT;
val = (int) ucval;
}
}
/* If optlen==0, it is equivalent to val == 0 */
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
err = 0;
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
struct ip_options *opt = NULL;
if (optlen > 40)
goto e_inval;
err = ip_options_get_from_user(sock_net(sk), &opt,
optval, optlen);
if (err)
break;
if (inet->is_icsk) {
struct inet_connection_sock *icsk = inet_csk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->sk_family == PF_INET ||
(!((1 << sk->sk_state) &
(TCPF_LISTEN | TCPF_CLOSE)) &&
inet->inet_daddr != LOOPBACK4_IPV6)) {
#endif
if (inet->opt)
icsk->icsk_ext_hdr_len -= inet->opt->optlen;
if (opt)
icsk->icsk_ext_hdr_len += opt->optlen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
}
#endif
}
opt = xchg(&inet->opt, opt);
kfree(opt);
break;
}
case IP_PKTINFO:
if (val)
inet->cmsg_flags |= IP_CMSG_PKTINFO;
else
inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
break;
case IP_RECVTTL:
if (val)
inet->cmsg_flags |= IP_CMSG_TTL;
else
inet->cmsg_flags &= ~IP_CMSG_TTL;
break;
case IP_RECVTOS:
if (val)
inet->cmsg_flags |= IP_CMSG_TOS;
else
inet->cmsg_flags &= ~IP_CMSG_TOS;
break;
case IP_RECVOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RECVOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
break;
case IP_RETOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RETOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
break;
case IP_PASSSEC:
if (val)
inet->cmsg_flags |= IP_CMSG_PASSSEC;
else
inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
break;
case IP_RECVORIGDSTADDR:
if (val)
inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
else
inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
val &= ~3;
val |= inet->tos & 3;
}
if (inet->tos != val) {
inet->tos = val;
sk->sk_priority = rt_tos2priority(val);
sk_dst_reset(sk);
}
break;
case IP_TTL:
if (optlen < 1)
goto e_inval;
if (val != -1 && (val < 0 || val > 255))
goto e_inval;
inet->uc_ttl = val;
break;
case IP_HDRINCL:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->hdrincl = val ? 1 : 0;
break;
case IP_NODEFRAG:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->nodefrag = val ? 1 : 0;
break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
goto e_inval;
inet->pmtudisc = val;
break;
case IP_RECVERR:
inet->recverr = !!val;
if (!val)
skb_queue_purge(&sk->sk_error_queue);
break;
case IP_MULTICAST_TTL:
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (optlen < 1)
goto e_inval;
if (val == -1)
val = 1;
if (val < 0 || val > 255)
goto e_inval;
inet->mc_ttl = val;
break;
case IP_MULTICAST_LOOP:
if (optlen < 1)
goto e_inval;
inet->mc_loop = !!val;
break;
case IP_MULTICAST_IF:
{
struct ip_mreqn mreq;
struct net_device *dev = NULL;
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
/*
* Check the arguments are allowable
*/
if (optlen < sizeof(struct in_addr))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (optlen >= sizeof(struct in_addr) &&
copy_from_user(&mreq.imr_address, optval,
sizeof(struct in_addr)))
break;
}
if (!mreq.imr_ifindex) {
if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
inet->mc_index = 0;
inet->mc_addr = 0;
err = 0;
break;
}
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
if (dev)
mreq.imr_ifindex = dev->ifindex;
} else
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
err = -EADDRNOTAVAIL;
if (!dev)
break;
dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if &&
mreq.imr_ifindex != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
inet->mc_addr = mreq.imr_address.s_addr;
err = 0;
break;
}
case IP_ADD_MEMBERSHIP:
case IP_DROP_MEMBERSHIP:
{
struct ip_mreqn mreq;
err = -EPROTO;
if (inet_sk(sk)->is_icsk)
break;
if (optlen < sizeof(struct ip_mreq))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
break;
}
if (optname == IP_ADD_MEMBERSHIP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case IP_MSFILTER:
{
struct ip_msfilter *msf;
if (optlen < IP_MSFILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
msf = kmalloc(optlen, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(msf, optval, optlen)) {
kfree(msf);
break;
}
/* numsrc >= (1G-4) overflow in 32 bits */
if (msf->imsf_numsrc >= 0x3ffffffcU ||
msf->imsf_numsrc > sysctl_igmp_max_msf) {
kfree(msf);
err = -ENOBUFS;
break;
}
if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
kfree(msf);
err = -EINVAL;
break;
}
err = ip_mc_msfilter(sk, msf, 0);
kfree(msf);
break;
}
case IP_BLOCK_SOURCE:
case IP_UNBLOCK_SOURCE:
case IP_ADD_SOURCE_MEMBERSHIP:
case IP_DROP_SOURCE_MEMBERSHIP:
{
struct ip_mreq_source mreqs;
int omode, add;
if (optlen != sizeof(struct ip_mreq_source))
goto e_inval;
if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
err = -EFAULT;
break;
}
if (optname == IP_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == IP_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
struct ip_mreqn mreq;
mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
mreq.imr_address.s_addr = mreqs.imr_interface;
mreq.imr_ifindex = 0;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
omode = MCAST_INCLUDE;
add = 1;
} else /* IP_DROP_SOURCE_MEMBERSHIP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs, 0);
break;
}
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
struct group_req greq;
struct sockaddr_in *psin;
struct ip_mreqn mreq;
if (optlen < sizeof(struct group_req))
goto e_inval;
err = -EFAULT;
if (copy_from_user(&greq, optval, sizeof(greq)))
break;
psin = (struct sockaddr_in *)&greq.gr_group;
if (psin->sin_family != AF_INET)
goto e_inval;
memset(&mreq, 0, sizeof(mreq));
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_ifindex = greq.gr_interface;
if (optname == MCAST_JOIN_GROUP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
struct group_source_req greqs;
struct ip_mreq_source mreqs;
struct sockaddr_in *psin;
int omode, add;
if (optlen != sizeof(struct group_source_req))
goto e_inval;
if (copy_from_user(&greqs, optval, sizeof(greqs))) {
err = -EFAULT;
break;
}
if (greqs.gsr_group.ss_family != AF_INET ||
greqs.gsr_source.ss_family != AF_INET) {
err = -EADDRNOTAVAIL;
break;
}
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreqs.imr_multiaddr = psin->sin_addr.s_addr;
psin = (struct sockaddr_in *)&greqs.gsr_source;
mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
mreqs.imr_interface = 0; /* use index for mc_source */
if (optname == MCAST_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == MCAST_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
struct ip_mreqn mreq;
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_address.s_addr = 0;
mreq.imr_ifindex = greqs.gsr_interface;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
greqs.gsr_interface = mreq.imr_ifindex;
omode = MCAST_INCLUDE;
add = 1;
} else /* MCAST_LEAVE_SOURCE_GROUP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs,
greqs.gsr_interface);
break;
}
case MCAST_MSFILTER:
{
struct sockaddr_in *psin;
struct ip_msfilter *msf = NULL;
struct group_filter *gsf = NULL;
int msize, i, ifindex;
if (optlen < GROUP_FILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
gsf = kmalloc(optlen, GFP_KERNEL);
if (!gsf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(gsf, optval, optlen))
goto mc_msf_out;
/* numsrc >= (4G-140)/128 overflow in 32 bits */
if (gsf->gf_numsrc >= 0x1ffffff ||
gsf->gf_numsrc > sysctl_igmp_max_msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
err = -EINVAL;
goto mc_msf_out;
}
msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
msf = kmalloc(msize, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
ifindex = gsf->gf_interface;
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET) {
err = -EADDRNOTAVAIL;
goto mc_msf_out;
}
msf->imsf_multiaddr = psin->sin_addr.s_addr;
msf->imsf_interface = 0;
msf->imsf_fmode = gsf->gf_fmode;
msf->imsf_numsrc = gsf->gf_numsrc;
err = -EADDRNOTAVAIL;
for (i = 0; i < gsf->gf_numsrc; ++i) {
psin = (struct sockaddr_in *)&gsf->gf_slist[i];
if (psin->sin_family != AF_INET)
goto mc_msf_out;
msf->imsf_slist[i] = psin->sin_addr.s_addr;
}
kfree(gsf);
gsf = NULL;
err = ip_mc_msfilter(sk, msf, ifindex);
mc_msf_out:
kfree(msf);
kfree(gsf);
break;
}
case IP_MULTICAST_ALL:
if (optlen < 1)
goto e_inval;
if (val != 0 && val != 1)
goto e_inval;
inet->mc_all = val;
break;
case IP_ROUTER_ALERT:
err = ip_ra_control(sk, val ? 1 : 0, NULL);
break;
case IP_FREEBIND:
if (optlen < 1)
goto e_inval;
inet->freebind = !!val;
break;
case IP_IPSEC_POLICY:
case IP_XFRM_POLICY:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
err = xfrm_user_policy(sk, optname, optval, optlen);
break;
case IP_TRANSPARENT:
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
if (optlen < 1)
goto e_inval;
inet->transparent = !!val;
break;
case IP_MINTTL:
if (optlen < 1)
goto e_inval;
if (val < 0 || val > 255)
goto e_inval;
inet->min_ttl = val;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
e_inval:
release_sock(sk);
return -EINVAL;
}
/**
* ip_queue_rcv_skb - Queue an skb into sock receive queue
* @sk: socket
* @skb: buffer
*
* Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
* is not set, we drop skb dst entry now, while dst cache line is hot.
*/
int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
skb_dst_drop(skb);
return sock_queue_rcv_skb(sk, skb);
}
EXPORT_SYMBOL(ip_queue_rcv_skb);
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(ip_setsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
return compat_mc_setsockopt(sk, level, optname, optval, optlen,
ip_setsockopt);
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = compat_nf_setsockopt(sk, PF_INET, optname,
optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_setsockopt);
#endif
/*
* Get the options. Note for future reference. The GET of IP options gets
* the _received_ ones. The set sets the _sent_ ones.
*/
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val;
int len;
if (level != SOL_IP)
return -EOPNOTSUPP;
if (ip_mroute_opt(optname))
return ip_mroute_getsockopt(sk, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
unsigned char optbuf[sizeof(struct ip_options)+40];
struct ip_options * opt = (struct ip_options *)optbuf;
opt->optlen = 0;
if (inet->opt)
memcpy(optbuf, inet->opt,
sizeof(struct ip_options)+
inet->opt->optlen);
release_sock(sk);
if (opt->optlen == 0)
return put_user(0, optlen);
ip_options_undo(opt);
len = min_t(unsigned int, len, opt->optlen);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, opt->__data, len))
return -EFAULT;
return 0;
}
case IP_PKTINFO:
val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
break;
case IP_RECVTTL:
val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
break;
case IP_RECVTOS:
val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
break;
case IP_RECVOPTS:
val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
break;
case IP_RETOPTS:
val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
break;
case IP_PASSSEC:
val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
break;
case IP_RECVORIGDSTADDR:
val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
break;
case IP_TOS:
val = inet->tos;
break;
case IP_TTL:
val = (inet->uc_ttl == -1 ?
sysctl_ip_default_ttl :
inet->uc_ttl);
break;
case IP_HDRINCL:
val = inet->hdrincl;
break;
case IP_NODEFRAG:
val = inet->nodefrag;
break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
case IP_MTU:
{
struct dst_entry *dst;
val = 0;
dst = sk_dst_get(sk);
if (dst) {
val = dst_mtu(dst);
dst_release(dst);
}
if (!val) {
release_sock(sk);
return -ENOTCONN;
}
break;
}
case IP_RECVERR:
val = inet->recverr;
break;
case IP_MULTICAST_TTL:
val = inet->mc_ttl;
break;
case IP_MULTICAST_LOOP:
val = inet->mc_loop;
break;
case IP_MULTICAST_IF:
{
struct in_addr addr;
len = min_t(unsigned int, len, sizeof(struct in_addr));
addr.s_addr = inet->mc_addr;
release_sock(sk);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &addr, len))
return -EFAULT;
return 0;
}
case IP_MSFILTER:
{
struct ip_msfilter msf;
int err;
if (len < IP_MSFILTER_SIZE(0)) {
release_sock(sk);
return -EINVAL;
}
if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
release_sock(sk);
return -EFAULT;
}
err = ip_mc_msfget(sk, &msf,
(struct ip_msfilter __user *)optval, optlen);
release_sock(sk);
return err;
}
case MCAST_MSFILTER:
{
struct group_filter gsf;
int err;
if (len < GROUP_FILTER_SIZE(0)) {
release_sock(sk);
return -EINVAL;
}
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
release_sock(sk);
return -EFAULT;
}
err = ip_mc_gsfget(sk, &gsf,
(struct group_filter __user *)optval,
optlen);
release_sock(sk);
return err;
}
case IP_MULTICAST_ALL:
val = inet->mc_all;
break;
case IP_PKTOPTIONS:
{
struct msghdr msg;
release_sock(sk);
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
msg.msg_controllen = len;
msg.msg_flags = 0;
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
info.ipi_addr.s_addr = inet->inet_rcv_saddr;
info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
if (inet->cmsg_flags & IP_CMSG_TTL) {
int hlim = inet->mc_ttl;
put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
}
len -= msg.msg_controllen;
return put_user(len, optlen);
}
case IP_FREEBIND:
val = inet->freebind;
break;
case IP_TRANSPARENT:
val = inet->transparent;
break;
case IP_MINTTL:
val = inet->min_ttl;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
unsigned char ucval = (unsigned char)val;
len = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &ucval, 1))
return -EFAULT;
} else {
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
}
return 0;
}
int ip_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen)
{
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = nf_getsockopt(sk, PF_INET, optname, optval,
&len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(ip_getsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
if (optname == MCAST_MSFILTER)
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ip_getsockopt);
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_getsockopt);
#endif
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The IP to API glue.
*
* Authors: see ip.c
*
* Fixes:
* Many : Split from ip.c , see ip.c for history.
* Martin Mares : TOS setting fixed.
* Alan Cox : Fixed a couple of oopses in Martin's
* TOS tweaks.
* Mike McLagan : Routing by source
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/tcp_states.h>
#include <linux/udp.h>
#include <linux/igmp.h>
#include <linux/netfilter.h>
#include <linux/route.h>
#include <linux/mroute.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/transp_v6.h>
#endif
#include <linux/errqueue.h>
#include <asm/uaccess.h>
#define IP_CMSG_PKTINFO 1
#define IP_CMSG_TTL 2
#define IP_CMSG_TOS 4
#define IP_CMSG_RECVOPTS 8
#define IP_CMSG_RETOPTS 16
#define IP_CMSG_PASSSEC 32
#define IP_CMSG_ORIGDSTADDR 64
/*
* SOL_IP control messages.
*/
static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
struct in_pktinfo info;
struct rtable *rt = skb_rtable(skb);
info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
if (rt) {
info.ipi_ifindex = rt->rt_iif;
info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
} else {
info.ipi_ifindex = 0;
info.ipi_spec_dst.s_addr = 0;
}
put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
{
int ttl = ip_hdr(skb)->ttl;
put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
}
static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
{
put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
}
static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
{
if (IPCB(skb)->opt.optlen == 0)
return;
put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
ip_hdr(skb) + 1);
}
static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options * opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
if (ip_options_echo(opt, skb)) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
ip_options_undo(opt);
put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
}
static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
{
char *secdata;
u32 seclen, secid;
int err;
err = security_socket_getpeersec_dgram(NULL, skb, &secid);
if (err)
return;
err = security_secid_to_secctx(secid, &secdata, &seclen);
if (err)
return;
put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
security_release_secctx(secdata, seclen);
}
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
const struct iphdr *iph = ip_hdr(skb);
__be16 *ports = (__be16 *)skb_transport_header(skb);
if (skb_transport_offset(skb) + 4 > skb->len)
return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = iph->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned flags = inet->cmsg_flags;
/* Ordered by supposed usage frequency */
if (flags & 1)
ip_cmsg_recv_pktinfo(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_ttl(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_tos(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_opts(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_retopts(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_security(msg, skb);
if ((flags >>= 1) == 0)
return;
if (flags & 1)
ip_cmsg_recv_dstaddr(msg, skb);
}
EXPORT_SYMBOL(ip_cmsg_recv);
int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
{
int err;
struct cmsghdr *cmsg;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
case IP_RETOPTS:
err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
err < 40 ? err : 40);
if (err)
return err;
break;
case IP_PKTINFO:
{
struct in_pktinfo *info;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
default:
return -EINVAL;
}
}
return 0;
}
/* Special input handler for packets caught by router alert option.
They are selected only by protocol field, and then processed likely
local ones; but only if someone wants them! Otherwise, router
not running rsvpd will kill RSVP.
It is user level problem, what it will make with them.
I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
static void ip_ra_destroy_rcu(struct rcu_head *head)
{
struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
sock_put(ra->saved_sk);
kfree(ra);
}
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
struct ip_ra_chain *ra, *new_ra;
struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
for (rap = &ip_ra_chain;
(ra = rcu_dereference_protected(*rap,
lockdep_is_held(&ip_ra_lock))) != NULL;
rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
}
/* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL;
rcu_assign_pointer(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
ra->destructor(sk);
/*
* Delay sock_put(sk) and kfree(ra) after one rcu grace
* period. This guarantee ip_call_ra_chain() dont need
* to mess with socket refcounts.
*/
ra->saved_sk = sk;
call_rcu(&ra->rcu, ip_ra_destroy_rcu);
return 0;
}
}
if (new_ra == NULL) {
spin_unlock_bh(&ip_ra_lock);
return -ENOBUFS;
}
new_ra->sk = sk;
new_ra->destructor = destructor;
new_ra->next = ra;
rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
spin_unlock_bh(&ip_ra_lock);
return 0;
}
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct sock_exterr_skb *serr;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
serr->ee.ee_type = icmp_hdr(skb)->type;
serr->ee.ee_code = icmp_hdr(skb)->code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
if (skb_pull(skb, payload - skb->data) != NULL) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
}
kfree_skb(skb);
}
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
struct iphdr *iph;
struct sk_buff *skb;
if (!inet->recverr)
return;
skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
if (!skb)
return;
skb_put(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->daddr = daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_type = 0;
serr->ee.ee_code = 0;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
serr->port = port;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
/*
* Handle MSG_ERRQUEUE
*/
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
{
struct sock_exterr_skb *serr;
struct sk_buff *skb, *skb2;
struct sockaddr_in *sin;
struct {
struct sock_extended_err ee;
struct sockaddr_in offender;
} errhdr;
int err;
int copied;
err = -EAGAIN;
skb = skb_dequeue(&sk->sk_error_queue);
if (skb == NULL)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto out_free_skb;
sock_recv_timestamp(msg, sk, skb);
serr = SKB_EXT_ERR(skb);
sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
sin->sin_port = serr->port;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
sin->sin_family = AF_UNSPEC;
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
struct inet_sock *inet = inet_sk(sk);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
/* Now we could try to dump offended packet options */
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
/* Reset and regenerate socket error */
spin_lock_bh(&sk->sk_error_queue.lock);
sk->sk_err = 0;
skb2 = skb_peek(&sk->sk_error_queue);
if (skb2 != NULL) {
sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
spin_unlock_bh(&sk->sk_error_queue.lock);
sk->sk_error_report(sk);
} else
spin_unlock_bh(&sk->sk_error_queue.lock);
out_free_skb:
kfree_skb(skb);
out:
return err;
}
static void opt_kfree_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct ip_options_rcu, rcu));
}
/*
* Socket option code for IP. This is the end of the line after any
* TCP,UDP etc options on an IP socket.
*/
static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val = 0, err;
if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
(1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
(1<<IP_RETOPTS) | (1<<IP_TOS) |
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
(1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
(1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
optname == IP_RECVORIGDSTADDR) {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
} else if (optlen >= sizeof(char)) {
unsigned char ucval;
if (get_user(ucval, (unsigned char __user *) optval))
return -EFAULT;
val = (int) ucval;
}
}
/* If optlen==0, it is equivalent to val == 0 */
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
err = 0;
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
struct ip_options_rcu *old, *opt = NULL;
if (optlen > 40)
goto e_inval;
err = ip_options_get_from_user(sock_net(sk), &opt,
optval, optlen);
if (err)
break;
old = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
if (inet->is_icsk) {
struct inet_connection_sock *icsk = inet_csk(sk);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (sk->sk_family == PF_INET ||
(!((1 << sk->sk_state) &
(TCPF_LISTEN | TCPF_CLOSE)) &&
inet->inet_daddr != LOOPBACK4_IPV6)) {
#endif
if (old)
icsk->icsk_ext_hdr_len -= old->opt.optlen;
if (opt)
icsk->icsk_ext_hdr_len += opt->opt.optlen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
}
#endif
}
rcu_assign_pointer(inet->inet_opt, opt);
if (old)
call_rcu(&old->rcu, opt_kfree_rcu);
break;
}
case IP_PKTINFO:
if (val)
inet->cmsg_flags |= IP_CMSG_PKTINFO;
else
inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
break;
case IP_RECVTTL:
if (val)
inet->cmsg_flags |= IP_CMSG_TTL;
else
inet->cmsg_flags &= ~IP_CMSG_TTL;
break;
case IP_RECVTOS:
if (val)
inet->cmsg_flags |= IP_CMSG_TOS;
else
inet->cmsg_flags &= ~IP_CMSG_TOS;
break;
case IP_RECVOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RECVOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
break;
case IP_RETOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RETOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
break;
case IP_PASSSEC:
if (val)
inet->cmsg_flags |= IP_CMSG_PASSSEC;
else
inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
break;
case IP_RECVORIGDSTADDR:
if (val)
inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
else
inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
val &= ~3;
val |= inet->tos & 3;
}
if (inet->tos != val) {
inet->tos = val;
sk->sk_priority = rt_tos2priority(val);
sk_dst_reset(sk);
}
break;
case IP_TTL:
if (optlen < 1)
goto e_inval;
if (val != -1 && (val < 0 || val > 255))
goto e_inval;
inet->uc_ttl = val;
break;
case IP_HDRINCL:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->hdrincl = val ? 1 : 0;
break;
case IP_NODEFRAG:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->nodefrag = val ? 1 : 0;
break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
goto e_inval;
inet->pmtudisc = val;
break;
case IP_RECVERR:
inet->recverr = !!val;
if (!val)
skb_queue_purge(&sk->sk_error_queue);
break;
case IP_MULTICAST_TTL:
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (optlen < 1)
goto e_inval;
if (val == -1)
val = 1;
if (val < 0 || val > 255)
goto e_inval;
inet->mc_ttl = val;
break;
case IP_MULTICAST_LOOP:
if (optlen < 1)
goto e_inval;
inet->mc_loop = !!val;
break;
case IP_MULTICAST_IF:
{
struct ip_mreqn mreq;
struct net_device *dev = NULL;
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
/*
* Check the arguments are allowable
*/
if (optlen < sizeof(struct in_addr))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (optlen >= sizeof(struct in_addr) &&
copy_from_user(&mreq.imr_address, optval,
sizeof(struct in_addr)))
break;
}
if (!mreq.imr_ifindex) {
if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
inet->mc_index = 0;
inet->mc_addr = 0;
err = 0;
break;
}
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
if (dev)
mreq.imr_ifindex = dev->ifindex;
} else
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
err = -EADDRNOTAVAIL;
if (!dev)
break;
dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if &&
mreq.imr_ifindex != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
inet->mc_addr = mreq.imr_address.s_addr;
err = 0;
break;
}
case IP_ADD_MEMBERSHIP:
case IP_DROP_MEMBERSHIP:
{
struct ip_mreqn mreq;
err = -EPROTO;
if (inet_sk(sk)->is_icsk)
break;
if (optlen < sizeof(struct ip_mreq))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
break;
}
if (optname == IP_ADD_MEMBERSHIP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case IP_MSFILTER:
{
struct ip_msfilter *msf;
if (optlen < IP_MSFILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
msf = kmalloc(optlen, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(msf, optval, optlen)) {
kfree(msf);
break;
}
/* numsrc >= (1G-4) overflow in 32 bits */
if (msf->imsf_numsrc >= 0x3ffffffcU ||
msf->imsf_numsrc > sysctl_igmp_max_msf) {
kfree(msf);
err = -ENOBUFS;
break;
}
if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
kfree(msf);
err = -EINVAL;
break;
}
err = ip_mc_msfilter(sk, msf, 0);
kfree(msf);
break;
}
case IP_BLOCK_SOURCE:
case IP_UNBLOCK_SOURCE:
case IP_ADD_SOURCE_MEMBERSHIP:
case IP_DROP_SOURCE_MEMBERSHIP:
{
struct ip_mreq_source mreqs;
int omode, add;
if (optlen != sizeof(struct ip_mreq_source))
goto e_inval;
if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
err = -EFAULT;
break;
}
if (optname == IP_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == IP_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
struct ip_mreqn mreq;
mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
mreq.imr_address.s_addr = mreqs.imr_interface;
mreq.imr_ifindex = 0;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
omode = MCAST_INCLUDE;
add = 1;
} else /* IP_DROP_SOURCE_MEMBERSHIP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs, 0);
break;
}
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
struct group_req greq;
struct sockaddr_in *psin;
struct ip_mreqn mreq;
if (optlen < sizeof(struct group_req))
goto e_inval;
err = -EFAULT;
if (copy_from_user(&greq, optval, sizeof(greq)))
break;
psin = (struct sockaddr_in *)&greq.gr_group;
if (psin->sin_family != AF_INET)
goto e_inval;
memset(&mreq, 0, sizeof(mreq));
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_ifindex = greq.gr_interface;
if (optname == MCAST_JOIN_GROUP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
struct group_source_req greqs;
struct ip_mreq_source mreqs;
struct sockaddr_in *psin;
int omode, add;
if (optlen != sizeof(struct group_source_req))
goto e_inval;
if (copy_from_user(&greqs, optval, sizeof(greqs))) {
err = -EFAULT;
break;
}
if (greqs.gsr_group.ss_family != AF_INET ||
greqs.gsr_source.ss_family != AF_INET) {
err = -EADDRNOTAVAIL;
break;
}
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreqs.imr_multiaddr = psin->sin_addr.s_addr;
psin = (struct sockaddr_in *)&greqs.gsr_source;
mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
mreqs.imr_interface = 0; /* use index for mc_source */
if (optname == MCAST_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == MCAST_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
struct ip_mreqn mreq;
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_address.s_addr = 0;
mreq.imr_ifindex = greqs.gsr_interface;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
greqs.gsr_interface = mreq.imr_ifindex;
omode = MCAST_INCLUDE;
add = 1;
} else /* MCAST_LEAVE_SOURCE_GROUP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs,
greqs.gsr_interface);
break;
}
case MCAST_MSFILTER:
{
struct sockaddr_in *psin;
struct ip_msfilter *msf = NULL;
struct group_filter *gsf = NULL;
int msize, i, ifindex;
if (optlen < GROUP_FILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
gsf = kmalloc(optlen, GFP_KERNEL);
if (!gsf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(gsf, optval, optlen))
goto mc_msf_out;
/* numsrc >= (4G-140)/128 overflow in 32 bits */
if (gsf->gf_numsrc >= 0x1ffffff ||
gsf->gf_numsrc > sysctl_igmp_max_msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
err = -EINVAL;
goto mc_msf_out;
}
msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
msf = kmalloc(msize, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
ifindex = gsf->gf_interface;
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET) {
err = -EADDRNOTAVAIL;
goto mc_msf_out;
}
msf->imsf_multiaddr = psin->sin_addr.s_addr;
msf->imsf_interface = 0;
msf->imsf_fmode = gsf->gf_fmode;
msf->imsf_numsrc = gsf->gf_numsrc;
err = -EADDRNOTAVAIL;
for (i = 0; i < gsf->gf_numsrc; ++i) {
psin = (struct sockaddr_in *)&gsf->gf_slist[i];
if (psin->sin_family != AF_INET)
goto mc_msf_out;
msf->imsf_slist[i] = psin->sin_addr.s_addr;
}
kfree(gsf);
gsf = NULL;
err = ip_mc_msfilter(sk, msf, ifindex);
mc_msf_out:
kfree(msf);
kfree(gsf);
break;
}
case IP_MULTICAST_ALL:
if (optlen < 1)
goto e_inval;
if (val != 0 && val != 1)
goto e_inval;
inet->mc_all = val;
break;
case IP_ROUTER_ALERT:
err = ip_ra_control(sk, val ? 1 : 0, NULL);
break;
case IP_FREEBIND:
if (optlen < 1)
goto e_inval;
inet->freebind = !!val;
break;
case IP_IPSEC_POLICY:
case IP_XFRM_POLICY:
err = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
err = xfrm_user_policy(sk, optname, optval, optlen);
break;
case IP_TRANSPARENT:
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
if (optlen < 1)
goto e_inval;
inet->transparent = !!val;
break;
case IP_MINTTL:
if (optlen < 1)
goto e_inval;
if (val < 0 || val > 255)
goto e_inval;
inet->min_ttl = val;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
e_inval:
release_sock(sk);
return -EINVAL;
}
/**
* ip_queue_rcv_skb - Queue an skb into sock receive queue
* @sk: socket
* @skb: buffer
*
* Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
* is not set, we drop skb dst entry now, while dst cache line is hot.
*/
int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
skb_dst_drop(skb);
return sock_queue_rcv_skb(sk, skb);
}
EXPORT_SYMBOL(ip_queue_rcv_skb);
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(ip_setsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
return compat_mc_setsockopt(sk, level, optname, optval, optlen,
ip_setsockopt);
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = compat_nf_setsockopt(sk, PF_INET, optname,
optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_setsockopt);
#endif
/*
* Get the options. Note for future reference. The GET of IP options gets
* the _received_ ones. The set sets the _sent_ ones.
*/
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct inet_sock *inet = inet_sk(sk);
int val;
int len;
if (level != SOL_IP)
return -EOPNOTSUPP;
if (ip_mroute_opt(optname))
return ip_mroute_getsockopt(sk, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
unsigned char optbuf[sizeof(struct ip_options)+40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
opt->optlen = 0;
if (inet_opt)
memcpy(optbuf, &inet_opt->opt,
sizeof(struct ip_options) +
inet_opt->opt.optlen);
release_sock(sk);
if (opt->optlen == 0)
return put_user(0, optlen);
ip_options_undo(opt);
len = min_t(unsigned int, len, opt->optlen);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, opt->__data, len))
return -EFAULT;
return 0;
}
case IP_PKTINFO:
val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
break;
case IP_RECVTTL:
val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
break;
case IP_RECVTOS:
val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
break;
case IP_RECVOPTS:
val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
break;
case IP_RETOPTS:
val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
break;
case IP_PASSSEC:
val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
break;
case IP_RECVORIGDSTADDR:
val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
break;
case IP_TOS:
val = inet->tos;
break;
case IP_TTL:
val = (inet->uc_ttl == -1 ?
sysctl_ip_default_ttl :
inet->uc_ttl);
break;
case IP_HDRINCL:
val = inet->hdrincl;
break;
case IP_NODEFRAG:
val = inet->nodefrag;
break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
case IP_MTU:
{
struct dst_entry *dst;
val = 0;
dst = sk_dst_get(sk);
if (dst) {
val = dst_mtu(dst);
dst_release(dst);
}
if (!val) {
release_sock(sk);
return -ENOTCONN;
}
break;
}
case IP_RECVERR:
val = inet->recverr;
break;
case IP_MULTICAST_TTL:
val = inet->mc_ttl;
break;
case IP_MULTICAST_LOOP:
val = inet->mc_loop;
break;
case IP_MULTICAST_IF:
{
struct in_addr addr;
len = min_t(unsigned int, len, sizeof(struct in_addr));
addr.s_addr = inet->mc_addr;
release_sock(sk);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &addr, len))
return -EFAULT;
return 0;
}
case IP_MSFILTER:
{
struct ip_msfilter msf;
int err;
if (len < IP_MSFILTER_SIZE(0)) {
release_sock(sk);
return -EINVAL;
}
if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
release_sock(sk);
return -EFAULT;
}
err = ip_mc_msfget(sk, &msf,
(struct ip_msfilter __user *)optval, optlen);
release_sock(sk);
return err;
}
case MCAST_MSFILTER:
{
struct group_filter gsf;
int err;
if (len < GROUP_FILTER_SIZE(0)) {
release_sock(sk);
return -EINVAL;
}
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
release_sock(sk);
return -EFAULT;
}
err = ip_mc_gsfget(sk, &gsf,
(struct group_filter __user *)optval,
optlen);
release_sock(sk);
return err;
}
case IP_MULTICAST_ALL:
val = inet->mc_all;
break;
case IP_PKTOPTIONS:
{
struct msghdr msg;
release_sock(sk);
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = optval;
msg.msg_controllen = len;
msg.msg_flags = 0;
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
info.ipi_addr.s_addr = inet->inet_rcv_saddr;
info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
if (inet->cmsg_flags & IP_CMSG_TTL) {
int hlim = inet->mc_ttl;
put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
}
len -= msg.msg_controllen;
return put_user(len, optlen);
}
case IP_FREEBIND:
val = inet->freebind;
break;
case IP_TRANSPARENT:
val = inet->transparent;
break;
case IP_MINTTL:
val = inet->min_ttl;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
unsigned char ucval = (unsigned char)val;
len = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &ucval, 1))
return -EFAULT;
} else {
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
}
return 0;
}
int ip_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen)
{
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = nf_getsockopt(sk, PF_INET, optname, optval,
&len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(ip_getsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
if (optname == MCAST_MSFILTER)
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ip_getsockopt);
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_getsockopt);
#endif
|
98,199,848,147,505 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* RAW - implementation of IP "raw" sockets.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Fixes:
* Alan Cox : verify_area() fixed up
* Alan Cox : ICMP error handling
* Alan Cox : EMSGSIZE if you send too big a packet
* Alan Cox : Now uses generic datagrams and shared
* skbuff library. No more peek crashes,
* no more backlogs
* Alan Cox : Checks sk->broadcast.
* Alan Cox : Uses skb_free_datagram/skb_copy_datagram
* Alan Cox : Raw passes ip options too
* Alan Cox : Setsocketopt added
* Alan Cox : Fixed error return for broadcasts
* Alan Cox : Removed wake_up calls
* Alan Cox : Use ttl/tos
* Alan Cox : Cleaned up old debugging
* Alan Cox : Use new kernel side addresses
* Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets.
* Alan Cox : BSD style RAW socket demultiplexing.
* Alan Cox : Beginnings of mrouted support.
* Alan Cox : Added IP_HDRINCL option.
* Alan Cox : Skip broadcast check if BSDism set.
* David S. Miller : New socket lookup architecture.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/aio.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/sockios.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/mroute.h>
#include <linux/netdevice.h>
#include <linux/in_route.h>
#include <linux/route.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/dst.h>
#include <net/sock.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/snmp.h>
#include <net/tcp_states.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
};
void raw_hash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
struct hlist_head *head;
head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
write_lock_bh(&h->lock);
sk_add_node(sk, head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_hash_sk);
void raw_unhash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
write_lock_bh(&h->lock);
if (sk_del_node_init(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_unhash_sk);
static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
unsigned short num, __be32 raddr, __be32 laddr, int dif)
{
struct hlist_node *node;
sk_for_each_from(sk, node) {
struct inet_sock *inet = inet_sk(sk);
if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
!(inet->inet_daddr && inet->inet_daddr != raddr) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found; /* gotcha */
}
sk = NULL;
found:
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
{
int type;
if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
return 1;
type = icmp_hdr(skb)->type;
if (type < 32) {
__u32 data = raw_sk(sk)->filter.data;
return ((1 << type) & data) != 0;
}
/* Do not block unknown ICMP types */
return 0;
}
/* IP input processing comes here for RAW socket delivery.
* Caller owns SKB, so we must make clones.
*
* RFC 1122: SHOULD pass TOS value up to the transport layer.
* -> It does. And not only TOS, but all IP header.
*/
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
struct sock *sk;
struct hlist_head *head;
int delivered = 0;
struct net *net;
read_lock(&raw_v4_hashinfo.lock);
head = &raw_v4_hashinfo.ht[hash];
if (hlist_empty(head))
goto out;
net = dev_net(skb->dev);
sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
while (sk) {
delivered = 1;
if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
if (clone)
raw_rcv(sk, clone);
}
sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
}
out:
read_unlock(&raw_v4_hashinfo.lock);
return delivered;
}
int raw_local_deliver(struct sk_buff *skb, int protocol)
{
int hash;
struct sock *raw_sk;
hash = protocol & (RAW_HTABLE_SIZE - 1);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
/* If there maybe a raw socket we must check - if not we
* don't care less
*/
if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
raw_sk = NULL;
return raw_sk != NULL;
}
static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
int err = 0;
int harderr = 0;
/* Report error on raw socket, if:
1. User requested ip_recverr.
2. Socket is connected (otherwise the error indication
is useless without ip_recverr and error is hard.
*/
if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
switch (type) {
default:
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
case ICMP_SOURCE_QUENCH:
return;
case ICMP_PARAMETERPROB:
err = EPROTO;
harderr = 1;
break;
case ICMP_DEST_UNREACH:
err = EHOSTUNREACH;
if (code > NR_ICMP_UNREACH)
break;
err = icmp_err_convert[code].errno;
harderr = icmp_err_convert[code].fatal;
if (code == ICMP_FRAG_NEEDED) {
harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
err = EMSGSIZE;
}
}
if (inet->recverr) {
const struct iphdr *iph = (const struct iphdr *)skb->data;
u8 *payload = skb->data + (iph->ihl << 2);
if (inet->hdrincl)
payload = skb->data;
ip_icmp_error(sk, skb, err, 0, info, payload);
}
if (inet->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
{
int hash;
struct sock *raw_sk;
const struct iphdr *iph;
struct net *net;
hash = protocol & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v4_hashinfo.lock);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
if (raw_sk != NULL) {
iph = (const struct iphdr *)skb->data;
net = dev_net(skb->dev);
while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
iph->daddr, iph->saddr,
skb->dev->ifindex)) != NULL) {
raw_err(raw_sk, skb, info);
raw_sk = sk_next(raw_sk);
iph = (const struct iphdr *)skb->data;
}
}
read_unlock(&raw_v4_hashinfo.lock);
}
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
if (ip_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return NET_RX_SUCCESS;
}
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
nf_reset(skb);
skb_push(skb, skb->data - skb_network_header(skb));
raw_rcv_skb(sk, skb);
return 0;
}
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct iphdr *iph;
struct sk_buff *skb;
unsigned int iphlen;
int err;
struct rtable *rt = *rtp;
if (length > rt->dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
skb_put(skb, length);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = -EFAULT;
if (memcpy_fromiovecend((void *)iph, from, 0, length))
goto error_free;
iphlen = iph->ihl * 4;
/*
* We don't want to modify the ip header, but we do need to
* be sure that it won't cause problems later along the network
* stack. Specifically we want to make sure that iph->ihl is a
* sane value. If ihl points beyond the length of the buffer passed
* in, reject the frame as invalid
*/
err = -EINVAL;
if (iphlen > length)
goto error_free;
if (iphlen >= sizeof(*iph)) {
if (!iph->saddr)
iph->saddr = rt->rt_src;
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
ip_select_ident(iph, &rt->dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_free:
kfree_skb(skb);
error:
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
return err;
}
static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
u8 __user *code = NULL;
int probed = 0;
unsigned int i;
if (!msg->msg_iov)
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
if (!iov)
continue;
switch (fl4->flowi4_proto) {
case IPPROTO_ICMP:
/* check if one-byte field is readable or not. */
if (iov->iov_base && iov->iov_len < 1)
break;
if (!type) {
type = iov->iov_base;
/* check if code field is readable or not. */
if (iov->iov_len > 1)
code = type + 1;
} else if (!code)
code = iov->iov_base;
if (type && code) {
if (get_user(fl4->fl4_icmp_type, type) ||
get_user(fl4->fl4_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
default:
probed = 1;
break;
}
if (probed)
break;
}
return 0;
}
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
static int complained;
if (!complained++)
printk(KERN_INFO "%s forgot to set AF_INET in "
"raw sendmsg. Fix it!\n",
current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
goto out;
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt)
ipc.opt = inet->opt;
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->srr) {
if (!daddr)
goto done;
daddr = ipc.opt->faddr;
}
}
tos = RT_CONN_FLAGS(sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, msg->msg_iov, len,
&rt, msg->msg_flags);
else {
if (!ipc.addr)
ipc.addr = rt->rt_dst;
lock_sock(sk);
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static void raw_close(struct sock *sk, long timeout)
{
/*
* Raw sockets may have direct kernel references. Kill them.
*/
ip_ra_control(sk, 0, NULL);
sk_common_release(sk);
}
static void raw_destroy(struct sock *sk)
{
lock_sock(sk);
ip_flush_pending_frames(sk);
release_sock(sk);
}
/* This gets rid of all the nasties in af_inet. -DaveM */
static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
ret = -EADDRNOTAVAIL;
if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
ret = 0;
out: return ret;
}
/*
* This should be easy, if there is something there
* we return it, otherwise we block.
*/
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
if (flags & MSG_ERRQUEUE) {
err = ip_recv_error(sk, msg, len);
goto out;
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
}
static int raw_init(struct sock *sk)
{
struct raw_sock *rp = raw_sk(sk);
if (inet_sk(sk)->inet_num == IPPROTO_ICMP)
memset(&rp->filter, 0, sizeof(rp->filter));
return 0;
}
static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
{
if (optlen > sizeof(struct icmp_filter))
optlen = sizeof(struct icmp_filter);
if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
}
static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
{
int len, ret = -EFAULT;
if (get_user(len, optlen))
goto out;
ret = -EINVAL;
if (len < 0)
goto out;
if (len > sizeof(struct icmp_filter))
len = sizeof(struct icmp_filter);
ret = -EFAULT;
if (put_user(len, optlen) ||
copy_to_user(optval, &raw_sk(sk)->filter, len))
goto out;
ret = 0;
out: return ret;
}
static int do_raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_seticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
static int raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
return do_raw_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_RAW)
return compat_ip_setsockopt(sk, level, optname, optval, optlen);
return do_raw_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
static int raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
return do_raw_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_RAW)
return compat_ip_getsockopt(sk, level, optname, optval, optlen);
return do_raw_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
.getsockopt = raw_getsockopt,
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.bind = raw_bind,
.backlog_rcv = raw_rcv_skb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw_sock),
.h.raw_hash = &raw_v4_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
.compat_ioctl = compat_raw_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static struct sock *raw_get_first(struct seq_file *seq)
{
struct sock *sk;
struct raw_iter_state *state = raw_seq_private(seq);
for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
struct hlist_node *node;
sk_for_each(sk, node, &state->h->ht[state->bucket])
if (sock_net(sk) == seq_file_net(seq))
goto found;
}
sk = NULL;
found:
return sk;
}
static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
{
struct raw_iter_state *state = raw_seq_private(seq);
do {
sk = sk_next(sk);
try_again:
;
} while (sk && sock_net(sk) != seq_file_net(seq));
if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
sk = sk_head(&state->h->ht[state->bucket]);
goto try_again;
}
return sk;
}
static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
{
struct sock *sk = raw_get_first(seq);
if (sk)
while (pos && (sk = raw_get_next(seq, sk)) != NULL)
--pos;
return pos ? NULL : sk;
}
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_lock(&state->h->lock);
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL_GPL(raw_seq_start);
void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = raw_get_first(seq);
else
sk = raw_get_next(seq, v);
++*pos;
return sk;
}
EXPORT_SYMBOL_GPL(raw_seq_next);
void raw_seq_stop(struct seq_file *seq, void *v)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_unlock(&state->h->lock);
}
EXPORT_SYMBOL_GPL(raw_seq_stop);
static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr,
src = inet->inet_rcv_saddr;
__u16 destp = 0,
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
static int raw_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops\n");
else
raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
return 0;
}
static const struct seq_operations raw_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw_seq_show,
};
int raw_seq_open(struct inode *ino, struct file *file,
struct raw_hashinfo *h, const struct seq_operations *ops)
{
int err;
struct raw_iter_state *i;
err = seq_open_net(ino, file, ops, sizeof(struct raw_iter_state));
if (err < 0)
return err;
i = raw_seq_private((struct seq_file *)file->private_data);
i->h = h;
return 0;
}
EXPORT_SYMBOL_GPL(raw_seq_open);
static int raw_v4_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v4_hashinfo, &raw_seq_ops);
}
static const struct file_operations raw_seq_fops = {
.owner = THIS_MODULE,
.open = raw_v4_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static __net_init int raw_init_net(struct net *net)
{
if (!proc_net_fops_create(net, "raw", S_IRUGO, &raw_seq_fops))
return -ENOMEM;
return 0;
}
static __net_exit void raw_exit_net(struct net *net)
{
proc_net_remove(net, "raw");
}
static __net_initdata struct pernet_operations raw_net_ops = {
.init = raw_init_net,
.exit = raw_exit_net,
};
int __init raw_proc_init(void)
{
return register_pernet_subsys(&raw_net_ops);
}
void __init raw_proc_exit(void)
{
unregister_pernet_subsys(&raw_net_ops);
}
#endif /* CONFIG_PROC_FS */
| /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* RAW - implementation of IP "raw" sockets.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Fixes:
* Alan Cox : verify_area() fixed up
* Alan Cox : ICMP error handling
* Alan Cox : EMSGSIZE if you send too big a packet
* Alan Cox : Now uses generic datagrams and shared
* skbuff library. No more peek crashes,
* no more backlogs
* Alan Cox : Checks sk->broadcast.
* Alan Cox : Uses skb_free_datagram/skb_copy_datagram
* Alan Cox : Raw passes ip options too
* Alan Cox : Setsocketopt added
* Alan Cox : Fixed error return for broadcasts
* Alan Cox : Removed wake_up calls
* Alan Cox : Use ttl/tos
* Alan Cox : Cleaned up old debugging
* Alan Cox : Use new kernel side addresses
* Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets.
* Alan Cox : BSD style RAW socket demultiplexing.
* Alan Cox : Beginnings of mrouted support.
* Alan Cox : Added IP_HDRINCL option.
* Alan Cox : Skip broadcast check if BSDism set.
* David S. Miller : New socket lookup architecture.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/aio.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/sockios.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/mroute.h>
#include <linux/netdevice.h>
#include <linux/in_route.h>
#include <linux/route.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/dst.h>
#include <net/sock.h>
#include <linux/ip.h>
#include <linux/net.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/snmp.h>
#include <net/tcp_states.h>
#include <net/inet_common.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
static struct raw_hashinfo raw_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
};
void raw_hash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
struct hlist_head *head;
head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
write_lock_bh(&h->lock);
sk_add_node(sk, head);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_hash_sk);
void raw_unhash_sk(struct sock *sk)
{
struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
write_lock_bh(&h->lock);
if (sk_del_node_init(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(raw_unhash_sk);
static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
unsigned short num, __be32 raddr, __be32 laddr, int dif)
{
struct hlist_node *node;
sk_for_each_from(sk, node) {
struct inet_sock *inet = inet_sk(sk);
if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
!(inet->inet_daddr && inet->inet_daddr != raddr) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found; /* gotcha */
}
sk = NULL;
found:
return sk;
}
/*
* 0 - deliver
* 1 - block
*/
static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
{
int type;
if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
return 1;
type = icmp_hdr(skb)->type;
if (type < 32) {
__u32 data = raw_sk(sk)->filter.data;
return ((1 << type) & data) != 0;
}
/* Do not block unknown ICMP types */
return 0;
}
/* IP input processing comes here for RAW socket delivery.
* Caller owns SKB, so we must make clones.
*
* RFC 1122: SHOULD pass TOS value up to the transport layer.
* -> It does. And not only TOS, but all IP header.
*/
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
struct sock *sk;
struct hlist_head *head;
int delivered = 0;
struct net *net;
read_lock(&raw_v4_hashinfo.lock);
head = &raw_v4_hashinfo.ht[hash];
if (hlist_empty(head))
goto out;
net = dev_net(skb->dev);
sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
while (sk) {
delivered = 1;
if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
if (clone)
raw_rcv(sk, clone);
}
sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
iph->saddr, iph->daddr,
skb->dev->ifindex);
}
out:
read_unlock(&raw_v4_hashinfo.lock);
return delivered;
}
int raw_local_deliver(struct sk_buff *skb, int protocol)
{
int hash;
struct sock *raw_sk;
hash = protocol & (RAW_HTABLE_SIZE - 1);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
/* If there maybe a raw socket we must check - if not we
* don't care less
*/
if (raw_sk && !raw_v4_input(skb, ip_hdr(skb), hash))
raw_sk = NULL;
return raw_sk != NULL;
}
static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
int err = 0;
int harderr = 0;
/* Report error on raw socket, if:
1. User requested ip_recverr.
2. Socket is connected (otherwise the error indication
is useless without ip_recverr and error is hard.
*/
if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
switch (type) {
default:
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
case ICMP_SOURCE_QUENCH:
return;
case ICMP_PARAMETERPROB:
err = EPROTO;
harderr = 1;
break;
case ICMP_DEST_UNREACH:
err = EHOSTUNREACH;
if (code > NR_ICMP_UNREACH)
break;
err = icmp_err_convert[code].errno;
harderr = icmp_err_convert[code].fatal;
if (code == ICMP_FRAG_NEEDED) {
harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
err = EMSGSIZE;
}
}
if (inet->recverr) {
const struct iphdr *iph = (const struct iphdr *)skb->data;
u8 *payload = skb->data + (iph->ihl << 2);
if (inet->hdrincl)
payload = skb->data;
ip_icmp_error(sk, skb, err, 0, info, payload);
}
if (inet->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
{
int hash;
struct sock *raw_sk;
const struct iphdr *iph;
struct net *net;
hash = protocol & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v4_hashinfo.lock);
raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
if (raw_sk != NULL) {
iph = (const struct iphdr *)skb->data;
net = dev_net(skb->dev);
while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
iph->daddr, iph->saddr,
skb->dev->ifindex)) != NULL) {
raw_err(raw_sk, skb, info);
raw_sk = sk_next(raw_sk);
iph = (const struct iphdr *)skb->data;
}
}
read_unlock(&raw_v4_hashinfo.lock);
}
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
/* Charge it to the socket. */
if (ip_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return NET_RX_SUCCESS;
}
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
nf_reset(skb);
skb_push(skb, skb->data - skb_network_header(skb));
raw_rcv_skb(sk, skb);
return 0;
}
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
struct rtable **rtp,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct iphdr *iph;
struct sk_buff *skb;
unsigned int iphlen;
int err;
struct rtable *rt = *rtp;
if (length > rt->dst.dev->mtu) {
ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
skb_put(skb, length);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = -EFAULT;
if (memcpy_fromiovecend((void *)iph, from, 0, length))
goto error_free;
iphlen = iph->ihl * 4;
/*
* We don't want to modify the ip header, but we do need to
* be sure that it won't cause problems later along the network
* stack. Specifically we want to make sure that iph->ihl is a
* sane value. If ihl points beyond the length of the buffer passed
* in, reject the frame as invalid
*/
err = -EINVAL;
if (iphlen > length)
goto error_free;
if (iphlen >= sizeof(*iph)) {
if (!iph->saddr)
iph->saddr = rt->rt_src;
iph->check = 0;
iph->tot_len = htons(length);
if (!iph->id)
ip_select_ident(iph, &rt->dst, NULL);
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_free:
kfree_skb(skb);
error:
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
return err;
}
static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
u8 __user *code = NULL;
int probed = 0;
unsigned int i;
if (!msg->msg_iov)
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
if (!iov)
continue;
switch (fl4->flowi4_proto) {
case IPPROTO_ICMP:
/* check if one-byte field is readable or not. */
if (iov->iov_base && iov->iov_len < 1)
break;
if (!type) {
type = iov->iov_base;
/* check if code field is readable or not. */
if (iov->iov_len > 1)
code = type + 1;
} else if (!code)
code = iov->iov_base;
if (type && code) {
if (get_user(fl4->fl4_icmp_type, type) ||
get_user(fl4->fl4_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
default:
probed = 1;
break;
}
if (probed)
break;
}
return 0;
}
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
struct ip_options_data opt_copy;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
static int complained;
if (!complained++)
printk(KERN_INFO "%s forgot to set AF_INET in "
"raw sendmsg. Fix it!\n",
current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
goto out;
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->opt.srr) {
if (!daddr)
goto done;
daddr = ipc.opt->opt.faddr;
}
}
tos = RT_CONN_FLAGS(sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, msg->msg_iov, len,
&rt, msg->msg_flags);
else {
if (!ipc.addr)
ipc.addr = rt->rt_dst;
lock_sock(sk);
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static void raw_close(struct sock *sk, long timeout)
{
/*
* Raw sockets may have direct kernel references. Kill them.
*/
ip_ra_control(sk, 0, NULL);
sk_common_release(sk);
}
static void raw_destroy(struct sock *sk)
{
lock_sock(sk);
ip_flush_pending_frames(sk);
release_sock(sk);
}
/* This gets rid of all the nasties in af_inet. -DaveM */
static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
ret = -EADDRNOTAVAIL;
if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
ret = 0;
out: return ret;
}
/*
* This should be easy, if there is something there
* we return it, otherwise we block.
*/
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
if (flags & MSG_ERRQUEUE) {
err = ip_recv_error(sk, msg, len);
goto out;
}
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err)
return err;
return copied;
}
static int raw_init(struct sock *sk)
{
struct raw_sock *rp = raw_sk(sk);
if (inet_sk(sk)->inet_num == IPPROTO_ICMP)
memset(&rp->filter, 0, sizeof(rp->filter));
return 0;
}
static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
{
if (optlen > sizeof(struct icmp_filter))
optlen = sizeof(struct icmp_filter);
if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
}
static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
{
int len, ret = -EFAULT;
if (get_user(len, optlen))
goto out;
ret = -EINVAL;
if (len < 0)
goto out;
if (len > sizeof(struct icmp_filter))
len = sizeof(struct icmp_filter);
ret = -EFAULT;
if (put_user(len, optlen) ||
copy_to_user(optval, &raw_sk(sk)->filter, len))
goto out;
ret = 0;
out: return ret;
}
static int do_raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_seticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
static int raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_RAW)
return ip_setsockopt(sk, level, optname, optval, optlen);
return do_raw_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_RAW)
return compat_ip_setsockopt(sk, level, optname, optval, optlen);
return do_raw_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (optname == ICMP_FILTER) {
if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
return -EOPNOTSUPP;
else
return raw_geticmpfilter(sk, optval, optlen);
}
return -ENOPROTOOPT;
}
static int raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_RAW)
return ip_getsockopt(sk, level, optname, optval, optlen);
return do_raw_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_RAW)
return compat_ip_getsockopt(sk, level, optname, optval, optlen);
return do_raw_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IP_MROUTE
return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
.getsockopt = raw_getsockopt,
.sendmsg = raw_sendmsg,
.recvmsg = raw_recvmsg,
.bind = raw_bind,
.backlog_rcv = raw_rcv_skb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw_sock),
.h.raw_hash = &raw_v4_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
.compat_ioctl = compat_raw_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static struct sock *raw_get_first(struct seq_file *seq)
{
struct sock *sk;
struct raw_iter_state *state = raw_seq_private(seq);
for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
++state->bucket) {
struct hlist_node *node;
sk_for_each(sk, node, &state->h->ht[state->bucket])
if (sock_net(sk) == seq_file_net(seq))
goto found;
}
sk = NULL;
found:
return sk;
}
static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
{
struct raw_iter_state *state = raw_seq_private(seq);
do {
sk = sk_next(sk);
try_again:
;
} while (sk && sock_net(sk) != seq_file_net(seq));
if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
sk = sk_head(&state->h->ht[state->bucket]);
goto try_again;
}
return sk;
}
static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
{
struct sock *sk = raw_get_first(seq);
if (sk)
while (pos && (sk = raw_get_next(seq, sk)) != NULL)
--pos;
return pos ? NULL : sk;
}
void *raw_seq_start(struct seq_file *seq, loff_t *pos)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_lock(&state->h->lock);
return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL_GPL(raw_seq_start);
void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = raw_get_first(seq);
else
sk = raw_get_next(seq, v);
++*pos;
return sk;
}
EXPORT_SYMBOL_GPL(raw_seq_next);
void raw_seq_stop(struct seq_file *seq, void *v)
{
struct raw_iter_state *state = raw_seq_private(seq);
read_unlock(&state->h->lock);
}
EXPORT_SYMBOL_GPL(raw_seq_stop);
static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr,
src = inet->inet_rcv_saddr;
__u16 destp = 0,
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
static int raw_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops\n");
else
raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
return 0;
}
static const struct seq_operations raw_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw_seq_show,
};
int raw_seq_open(struct inode *ino, struct file *file,
struct raw_hashinfo *h, const struct seq_operations *ops)
{
int err;
struct raw_iter_state *i;
err = seq_open_net(ino, file, ops, sizeof(struct raw_iter_state));
if (err < 0)
return err;
i = raw_seq_private((struct seq_file *)file->private_data);
i->h = h;
return 0;
}
EXPORT_SYMBOL_GPL(raw_seq_open);
static int raw_v4_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v4_hashinfo, &raw_seq_ops);
}
static const struct file_operations raw_seq_fops = {
.owner = THIS_MODULE,
.open = raw_v4_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static __net_init int raw_init_net(struct net *net)
{
if (!proc_net_fops_create(net, "raw", S_IRUGO, &raw_seq_fops))
return -ENOMEM;
return 0;
}
static __net_exit void raw_exit_net(struct net *net)
{
proc_net_remove(net, "raw");
}
static __net_initdata struct pernet_operations raw_net_ops = {
.init = raw_init_net,
.exit = raw_exit_net,
};
int __init raw_proc_init(void)
{
return register_pernet_subsys(&raw_net_ops);
}
void __init raw_proc_exit(void)
{
unregister_pernet_subsys(&raw_net_ops);
}
#endif /* CONFIG_PROC_FS */
|
141,965,066,543,237 | /*
* Syncookies implementation for the Linux kernel
*
* Copyright (C) 1997 Andi Kleen
* Based on ideas by D.J.Bernstein and Eric Schenk.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/cryptohash.h>
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/route.h>
/* Timestamps: lowest bits store TCP options */
#define TSBITS 6
#define TSMASK (((__u32)1 << TSBITS) - 1)
extern int sysctl_tcp_syncookies;
__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
EXPORT_SYMBOL(syncookie_secret);
static __init int init_syncookies(void)
{
get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
return 0;
}
__initcall(init_syncookies);
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv4_cookie_scratch);
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
{
__u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
tmp[1] = (__force u32)daddr;
tmp[2] = ((__force u32)sport << 16) + (__force u32)dport;
tmp[3] = count;
sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
return tmp[17];
}
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
* sent in the syn-ack.
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
__u32 cookie_init_timestamp(struct request_sock *req)
{
struct inet_request_sock *ireq;
u32 ts, ts_now = tcp_time_stamp;
u32 options = 0;
ireq = inet_rsk(req);
options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
options |= ireq->sack_ok << 4;
options |= ireq->ecn_ok << 5;
ts = ts_now & ~TSMASK;
ts |= options;
if (ts > ts_now) {
ts >>= TSBITS;
ts--;
ts <<= TSBITS;
ts |= options;
}
return ts;
}
static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
__be16 dport, __u32 sseq, __u32 count,
__u32 data)
{
/*
* Compute the secure sequence number.
* The output should be:
* HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
* + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
* Where sseq is their sequence number and count increases every
* minute by 1.
* As an extra hack, we add a small "data" value that encodes the
* MSS into the second hash value.
*/
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
sseq + (count << COOKIEBITS) +
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
& COOKIEMASK));
}
/*
* This retrieves the small "data" value from the syncookie.
* If the syncookie is bad, the data returned will be out of
* range. This must be checked by the caller.
*
* The count value used to generate the cookie must be within
* "maxdiff" if the current (passed-in) "count". The return value
* is (__u32)-1 if this test fails.
*/
static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
__be16 sport, __be16 dport, __u32 sseq,
__u32 count, __u32 maxdiff)
{
__u32 diff;
/* Strip away the layers from the cookie */
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
if (diff >= maxdiff)
return (__u32)-1;
return (cookie -
cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
& COOKIEMASK; /* Leaving the data behind */
}
/*
* MSS Values are taken from the 2009 paper
* 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
* - values 1440 to 1460 accounted for 80% of observed mss values
* - values outside the 536-1460 range are rare (<0.2%).
*
* Table must be sorted.
*/
static __u16 const msstab[] = {
64,
512,
536,
1024,
1440,
1460,
4312,
8960,
};
/*
* Generate a syncookie. mssp points to the mss, which is returned
* rounded down to the value encoded in the cookie.
*/
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
tcp_synq_overflow(sk);
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
th->source, th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
/*
* This (misnamed) value is the age of syncookie which is permitted.
* Its ideal value should be dependent on TCP_TIMEOUT_INIT and
* sysctl_tcp_retries1. It's a rather complicated formula (exponential
* backoff) to compute at runtime so it's currently hardcoded here.
*/
#define COUNTER_TRIES 4
/*
* Check if a ack sequence number is a valid syncookie.
* Return the decoded mss if it is, or 0 if not.
*/
static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
th->source, th->dest, seq,
jiffies / (HZ * 60),
COUNTER_TRIES);
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
if (child)
inet_csk_reqsk_queue_add(sk, req, child);
else
reqsk_free(req);
return child;
}
/*
* when syncookies are in effect and tcp timestamps are enabled we stored
* additional tcp options in the timestamp.
* This extracts these options from the timestamp echo.
*
* The lowest 4 bits store snd_wscale.
* next 2 bits indicate SACK and ECN support.
*
* return false if we decode an option that should not be.
*/
bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
{
/* echoed timestamp, lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr & TSMASK;
if (!tcp_opt->saw_tstamp) {
tcp_clear_options(tcp_opt);
return true;
}
if (!sysctl_tcp_timestamps)
return false;
tcp_opt->sack_ok = (options >> 4) & 0x1;
*ecn_ok = (options >> 5) & 1;
if (*ecn_ok && !sysctl_tcp_ecn)
return false;
if (tcp_opt->sack_ok && !sysctl_tcp_sack)
return false;
if ((options & 0xf) == 0xf)
return true; /* no window scaling */
tcp_opt->wscale_ok = 1;
tcp_opt->snd_wscale = options & 0xf;
return sysctl_tcp_window_scaling != 0;
}
EXPORT_SYMBOL(cookie_check_timestamp);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_options_received tcp_opt;
u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
const struct tcphdr *th = tcp_hdr(skb);
__u32 cookie = ntohl(th->ack_seq) - 1;
struct sock *ret = sk;
struct request_sock *req;
int mss;
struct rtable *rt;
__u8 rcv_wscale;
bool ecn_ok;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
(mss = cookie_check(skb, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
goto out;
ireq = inet_rsk(req);
treq = tcp_rsk(req);
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
req->mss = mss;
ireq->loc_port = th->dest;
ireq->rmt_port = th->source;
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
if (opt && opt->optlen) {
int opt_size = sizeof(struct ip_options) + opt->optlen;
ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) {
kfree(ireq->opt);
ireq->opt = NULL;
}
}
if (security_inet_conn_request(sk, skb, req)) {
reqsk_free(req);
goto out;
}
req->expires = 0UL;
req->retrans = 0;
/*
* We need to lookup the route here to get at the correct
* window size. We should better make sure that the window size
* hasn't changed since we received the original syn, but I see
* no easy way to do this.
*/
{
struct flowi4 fl4;
flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
ireq->loc_addr, th->source, th->dest);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
reqsk_free(req);
goto out;
}
}
/* Try to redo what tcp_v4_send_synack did. */
req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
ret = get_cookie_sock(sk, skb, req, &rt->dst);
out: return ret;
}
| /*
* Syncookies implementation for the Linux kernel
*
* Copyright (C) 1997 Andi Kleen
* Based on ideas by D.J.Bernstein and Eric Schenk.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/cryptohash.h>
#include <linux/kernel.h>
#include <net/tcp.h>
#include <net/route.h>
/* Timestamps: lowest bits store TCP options */
#define TSBITS 6
#define TSMASK (((__u32)1 << TSBITS) - 1)
extern int sysctl_tcp_syncookies;
__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
EXPORT_SYMBOL(syncookie_secret);
static __init int init_syncookies(void)
{
get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
return 0;
}
__initcall(init_syncookies);
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv4_cookie_scratch);
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
{
__u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
tmp[1] = (__force u32)daddr;
tmp[2] = ((__force u32)sport << 16) + (__force u32)dport;
tmp[3] = count;
sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
return tmp[17];
}
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
* sent in the syn-ack.
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
__u32 cookie_init_timestamp(struct request_sock *req)
{
struct inet_request_sock *ireq;
u32 ts, ts_now = tcp_time_stamp;
u32 options = 0;
ireq = inet_rsk(req);
options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
options |= ireq->sack_ok << 4;
options |= ireq->ecn_ok << 5;
ts = ts_now & ~TSMASK;
ts |= options;
if (ts > ts_now) {
ts >>= TSBITS;
ts--;
ts <<= TSBITS;
ts |= options;
}
return ts;
}
static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
__be16 dport, __u32 sseq, __u32 count,
__u32 data)
{
/*
* Compute the secure sequence number.
* The output should be:
* HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
* + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
* Where sseq is their sequence number and count increases every
* minute by 1.
* As an extra hack, we add a small "data" value that encodes the
* MSS into the second hash value.
*/
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
sseq + (count << COOKIEBITS) +
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
& COOKIEMASK));
}
/*
* This retrieves the small "data" value from the syncookie.
* If the syncookie is bad, the data returned will be out of
* range. This must be checked by the caller.
*
* The count value used to generate the cookie must be within
* "maxdiff" if the current (passed-in) "count". The return value
* is (__u32)-1 if this test fails.
*/
static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
__be16 sport, __be16 dport, __u32 sseq,
__u32 count, __u32 maxdiff)
{
__u32 diff;
/* Strip away the layers from the cookie */
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
if (diff >= maxdiff)
return (__u32)-1;
return (cookie -
cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
& COOKIEMASK; /* Leaving the data behind */
}
/*
* MSS Values are taken from the 2009 paper
* 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
* - values 1440 to 1460 accounted for 80% of observed mss values
* - values outside the 536-1460 range are rare (<0.2%).
*
* Table must be sorted.
*/
static __u16 const msstab[] = {
64,
512,
536,
1024,
1440,
1460,
4312,
8960,
};
/*
* Generate a syncookie. mssp points to the mss, which is returned
* rounded down to the value encoded in the cookie.
*/
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
tcp_synq_overflow(sk);
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
th->source, th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
/*
* This (misnamed) value is the age of syncookie which is permitted.
* Its ideal value should be dependent on TCP_TIMEOUT_INIT and
* sysctl_tcp_retries1. It's a rather complicated formula (exponential
* backoff) to compute at runtime so it's currently hardcoded here.
*/
#define COUNTER_TRIES 4
/*
* Check if a ack sequence number is a valid syncookie.
* Return the decoded mss if it is, or 0 if not.
*/
static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
th->source, th->dest, seq,
jiffies / (HZ * 60),
COUNTER_TRIES);
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
if (child)
inet_csk_reqsk_queue_add(sk, req, child);
else
reqsk_free(req);
return child;
}
/*
* when syncookies are in effect and tcp timestamps are enabled we stored
* additional tcp options in the timestamp.
* This extracts these options from the timestamp echo.
*
* The lowest 4 bits store snd_wscale.
* next 2 bits indicate SACK and ECN support.
*
* return false if we decode an option that should not be.
*/
bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
{
/* echoed timestamp, lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr & TSMASK;
if (!tcp_opt->saw_tstamp) {
tcp_clear_options(tcp_opt);
return true;
}
if (!sysctl_tcp_timestamps)
return false;
tcp_opt->sack_ok = (options >> 4) & 0x1;
*ecn_ok = (options >> 5) & 1;
if (*ecn_ok && !sysctl_tcp_ecn)
return false;
if (tcp_opt->sack_ok && !sysctl_tcp_sack)
return false;
if ((options & 0xf) == 0xf)
return true; /* no window scaling */
tcp_opt->wscale_ok = 1;
tcp_opt->snd_wscale = options & 0xf;
return sysctl_tcp_window_scaling != 0;
}
EXPORT_SYMBOL(cookie_check_timestamp);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_options_received tcp_opt;
u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
const struct tcphdr *th = tcp_hdr(skb);
__u32 cookie = ntohl(th->ack_seq) - 1;
struct sock *ret = sk;
struct request_sock *req;
int mss;
struct rtable *rt;
__u8 rcv_wscale;
bool ecn_ok;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
(mss = cookie_check(skb, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
goto out;
ireq = inet_rsk(req);
treq = tcp_rsk(req);
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
req->mss = mss;
ireq->loc_port = th->dest;
ireq->rmt_port = th->source;
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
if (opt && opt->optlen) {
int opt_size = sizeof(struct ip_options_rcu) + opt->optlen;
ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) {
kfree(ireq->opt);
ireq->opt = NULL;
}
}
if (security_inet_conn_request(sk, skb, req)) {
reqsk_free(req);
goto out;
}
req->expires = 0UL;
req->retrans = 0;
/*
* We need to lookup the route here to get at the correct
* window size. We should better make sure that the window size
* hasn't changed since we received the original syn, but I see
* no easy way to do this.
*/
{
struct flowi4 fl4;
flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
ireq->loc_addr, th->source, th->dest);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
reqsk_free(req);
goto out;
}
}
/* Try to redo what tcp_v4_send_synack did. */
req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
ret = get_cookie_sock(sk, skb, req, &rt->dst);
out: return ret;
}
|
165,610,100,722,415 | /*
* L2TPv3 IP encapsulation support
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/socket.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include "l2tp_core.h"
struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet;
__u32 conn_id;
__u32 peer_conn_id;
__u64 tx_packets;
__u64 tx_bytes;
__u64 tx_errors;
__u64 rx_packets;
__u64 rx_bytes;
__u64 rx_errors;
};
static DEFINE_RWLOCK(l2tp_ip_lock);
static struct hlist_head l2tp_ip_table;
static struct hlist_head l2tp_ip_bind_table;
static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
{
return (struct l2tp_ip_sock *)sk;
}
static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
struct hlist_node *node;
struct sock *sk;
sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
if (l2tp == NULL)
continue;
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found;
}
sk = NULL;
found:
return sk;
}
static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
if (sk)
sock_hold(sk);
return sk;
}
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
* preceded by 32-bits of zeros.
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Session ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Cookie (optional, maximum 64 bits)...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* L2TPv3 Control Message Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | (32 bits of zeros) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Control Connection ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Ns | Nr |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All control frames are passed to userspace.
*/
static int l2tp_ip_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
offset = 0;
do {
printk(" %02X", ptr[offset]);
} while (++offset < length);
printk("\n");
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
read_unlock_bh(&l2tp_ip_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
static int l2tp_ip_open(struct sock *sk)
{
/* Prevent autobind. We don't have ports. */
inet_sk(sk)->inet_num = IPPROTO_L2TP;
write_lock_bh(&l2tp_ip_lock);
sk_add_node(sk, &l2tp_ip_table);
write_unlock_bh(&l2tp_ip_lock);
return 0;
}
static void l2tp_ip_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
hlist_del_init(&sk->sk_node);
write_unlock_bh(&l2tp_ip_lock);
sk_common_release(sk);
}
static void l2tp_ip_destroy_sock(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
sk_refcnt_debug_dec(sk);
}
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
ret = -EADDRINUSE;
read_lock_bh(&l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
goto out_in_use;
read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk);
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
ret = -EADDRNOTAVAIL;
if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
write_lock_bh(&l2tp_ip_lock);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
ret = 0;
out:
release_sock(sk);
return ret;
out_in_use:
read_unlock_bh(&l2tp_ip_lock);
return ret;
}
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct flowi4 fl4;
struct rtable *rt;
__be32 saddr;
int oif, rc;
rc = -EINVAL;
if (addr_len < sizeof(*lsa))
goto out;
rc = -EAFNOSUPPORT;
if (lsa->l2tp_family != AF_INET)
goto out;
sk_dst_reset(sk);
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
rc = -EINVAL;
if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
goto out;
rt = ip_route_connect(&fl4, lsa->l2tp_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
IPPROTO_L2TP,
0, 0, sk, true);
if (IS_ERR(rt)) {
rc = PTR_ERR(rt);
if (rc == -ENETUNREACH)
IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
rc = -ENETUNREACH;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
goto out;
}
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
if (!inet->inet_saddr)
inet->inet_saddr = rt->rt_src;
if (!inet->inet_rcv_saddr)
inet->inet_rcv_saddr = rt->rt_src;
inet->inet_daddr = rt->rt_dst;
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
write_unlock_bh(&l2tp_ip_lock);
rc = 0;
out:
return rc;
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
memset(lsa, 0, sizeof(*lsa));
lsa->l2tp_family = AF_INET;
if (peer) {
if (!inet->inet_dport)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr.s_addr = inet->inet_daddr;
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
lsa->l2tp_addr.s_addr = addr;
}
*uaddr_len = sizeof(*lsa);
return 0;
}
static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
/* Charge it to the socket, dropping if the queue is full. */
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0)
goto drop;
return 0;
drop:
IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
return -1;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
int rc;
struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt = inet->opt;
struct rtable *rt = NULL;
int connected = 0;
__be32 daddr;
if (sock_flag(sk, SOCK_DEAD))
return -ENOTCONN;
/* Get and verify the address. */
if (msg->msg_name) {
struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
if (msg->msg_namelen < sizeof(*lip))
return -EINVAL;
if (lip->l2tp_family != AF_INET) {
if (lip->l2tp_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = lip->l2tp_addr.s_addr;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
connected = 1;
}
/* Allocate a socket buffer */
rc = -ENOMEM;
skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
4 + len, 0, GFP_KERNEL);
if (!skb)
goto error;
/* Reserve space for headers, putting IP header on 4-byte boundary. */
skb_reserve(skb, 2 + NET_SKB_PAD);
skb_reset_network_header(skb);
skb_reserve(skb, sizeof(struct iphdr));
skb_reset_transport_header(skb);
/* Insert 0 session_id */
*((__be32 *) skb_put(skb, 4)) = 0;
/* Copy user data into skb */
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
goto error;
}
if (connected)
rt = (struct rtable *) __sk_dst_check(sk, 0);
if (rt == NULL) {
/* Use correct destination address if we have options. */
if (opt && opt->srr)
daddr = opt->faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(sock_net(sk), sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
}
skb_dst_set(skb, dst_clone(&rt->dst));
/* Queue the packet to IP for output */
rc = ip_queue_xmit(skb);
error:
/* Update stats */
if (rc >= 0) {
lsa->tx_packets++;
lsa->tx_bytes += len;
rc = len;
} else {
lsa->tx_errors++;
}
return rc;
no_route:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err) {
lsk->rx_errors++;
return err;
}
lsk->rx_packets++;
lsk->rx_bytes += copied;
return copied;
}
static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.init = l2tp_ip_open,
.close = l2tp_ip_close,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = l2tp_ip_sendmsg,
.recvmsg = l2tp_ip_recvmsg,
.backlog_rcv = l2tp_ip_backlog_recv,
.hash = inet_hash,
.unhash = inet_unhash,
.obj_size = sizeof(struct l2tp_ip_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
static const struct proto_ops l2tp_ip_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw l2tp_ip_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_L2TP,
.prot = &l2tp_ip_prot,
.ops = &l2tp_ip_ops,
.no_check = 0,
};
static struct net_protocol l2tp_ip_protocol __read_mostly = {
.handler = l2tp_ip_recv,
};
static int __init l2tp_ip_init(void)
{
int err;
printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
goto out;
err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
if (err)
goto out1;
inet_register_protosw(&l2tp_ip_protosw);
return 0;
out1:
proto_unregister(&l2tp_ip_prot);
out:
return err;
}
static void __exit l2tp_ip_exit(void)
{
inet_unregister_protosw(&l2tp_ip_protosw);
inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip_prot);
}
module_init(l2tp_ip_init);
module_exit(l2tp_ip_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP over IP");
MODULE_VERSION("1.0");
/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
* enums
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
| /*
* L2TPv3 IP encapsulation support
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <linux/socket.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include "l2tp_core.h"
struct l2tp_ip_sock {
/* inet_sock has to be the first member of l2tp_ip_sock */
struct inet_sock inet;
__u32 conn_id;
__u32 peer_conn_id;
__u64 tx_packets;
__u64 tx_bytes;
__u64 tx_errors;
__u64 rx_packets;
__u64 rx_bytes;
__u64 rx_errors;
};
static DEFINE_RWLOCK(l2tp_ip_lock);
static struct hlist_head l2tp_ip_table;
static struct hlist_head l2tp_ip_bind_table;
static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
{
return (struct l2tp_ip_sock *)sk;
}
static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
struct hlist_node *node;
struct sock *sk;
sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
if (l2tp == NULL)
continue;
if ((l2tp->conn_id == tunnel_id) &&
net_eq(sock_net(sk), net) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found;
}
sk = NULL;
found:
return sk;
}
static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
{
struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
if (sk)
sock_hold(sk);
return sk;
}
/* When processing receive frames, there are two cases to
* consider. Data frames consist of a non-zero session-id and an
* optional cookie. Control frames consist of a regular L2TP header
* preceded by 32-bits of zeros.
*
* L2TPv3 Session Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Session ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Cookie (optional, maximum 64 bits)...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* L2TPv3 Control Message Header Over IP
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | (32 bits of zeros) |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Control Connection ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Ns | Nr |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* All control frames are passed to userspace.
*/
static int l2tp_ip_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
int offset;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
offset = 0;
do {
printk(" %02X", ptr[offset]);
} while (++offset < length);
printk("\n");
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
read_unlock_bh(&l2tp_ip_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
static int l2tp_ip_open(struct sock *sk)
{
/* Prevent autobind. We don't have ports. */
inet_sk(sk)->inet_num = IPPROTO_L2TP;
write_lock_bh(&l2tp_ip_lock);
sk_add_node(sk, &l2tp_ip_table);
write_unlock_bh(&l2tp_ip_lock);
return 0;
}
static void l2tp_ip_close(struct sock *sk, long timeout)
{
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
hlist_del_init(&sk->sk_node);
write_unlock_bh(&l2tp_ip_lock);
sk_common_release(sk);
}
static void l2tp_ip_destroy_sock(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
sk_refcnt_debug_dec(sk);
}
static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
int ret = -EINVAL;
int chk_addr_ret;
ret = -EADDRINUSE;
read_lock_bh(&l2tp_ip_lock);
if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
goto out_in_use;
read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk);
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
ret = -EADDRNOTAVAIL;
if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
sk_dst_reset(sk);
l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
write_lock_bh(&l2tp_ip_lock);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
ret = 0;
out:
release_sock(sk);
return ret;
out_in_use:
read_unlock_bh(&l2tp_ip_lock);
return ret;
}
static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct flowi4 fl4;
struct rtable *rt;
__be32 saddr;
int oif, rc;
rc = -EINVAL;
if (addr_len < sizeof(*lsa))
goto out;
rc = -EAFNOSUPPORT;
if (lsa->l2tp_family != AF_INET)
goto out;
sk_dst_reset(sk);
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
rc = -EINVAL;
if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
goto out;
rt = ip_route_connect(&fl4, lsa->l2tp_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
IPPROTO_L2TP,
0, 0, sk, true);
if (IS_ERR(rt)) {
rc = PTR_ERR(rt);
if (rc == -ENETUNREACH)
IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
rc = -ENETUNREACH;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
goto out;
}
l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
if (!inet->inet_saddr)
inet->inet_saddr = rt->rt_src;
if (!inet->inet_rcv_saddr)
inet->inet_rcv_saddr = rt->rt_src;
inet->inet_daddr = rt->rt_dst;
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
write_lock_bh(&l2tp_ip_lock);
hlist_del_init(&sk->sk_bind_node);
sk_add_bind_node(sk, &l2tp_ip_bind_table);
write_unlock_bh(&l2tp_ip_lock);
rc = 0;
out:
return rc;
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
memset(lsa, 0, sizeof(*lsa));
lsa->l2tp_family = AF_INET;
if (peer) {
if (!inet->inet_dport)
return -ENOTCONN;
lsa->l2tp_conn_id = lsk->peer_conn_id;
lsa->l2tp_addr.s_addr = inet->inet_daddr;
} else {
__be32 addr = inet->inet_rcv_saddr;
if (!addr)
addr = inet->inet_saddr;
lsa->l2tp_conn_id = lsk->conn_id;
lsa->l2tp_addr.s_addr = addr;
}
*uaddr_len = sizeof(*lsa);
return 0;
}
static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
/* Charge it to the socket, dropping if the queue is full. */
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0)
goto drop;
return 0;
drop:
IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
kfree_skb(skb);
return -1;
}
/* Userspace will call sendmsg() on the tunnel socket to send L2TP
* control frames.
*/
static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
int rc;
struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = NULL;
int connected = 0;
__be32 daddr;
if (sock_flag(sk, SOCK_DEAD))
return -ENOTCONN;
/* Get and verify the address. */
if (msg->msg_name) {
struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
if (msg->msg_namelen < sizeof(*lip))
return -EINVAL;
if (lip->l2tp_family != AF_INET) {
if (lip->l2tp_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = lip->l2tp_addr.s_addr;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
connected = 1;
}
/* Allocate a socket buffer */
rc = -ENOMEM;
skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
4 + len, 0, GFP_KERNEL);
if (!skb)
goto error;
/* Reserve space for headers, putting IP header on 4-byte boundary. */
skb_reserve(skb, 2 + NET_SKB_PAD);
skb_reset_network_header(skb);
skb_reserve(skb, sizeof(struct iphdr));
skb_reset_transport_header(skb);
/* Insert 0 session_id */
*((__be32 *) skb_put(skb, 4)) = 0;
/* Copy user data into skb */
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
goto error;
}
if (connected)
rt = (struct rtable *) __sk_dst_check(sk, 0);
if (rt == NULL) {
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
/* Use correct destination address if we have options. */
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
/* If this fails, retransmit mechanism of transport layer will
* keep trying until route appears or the connection times
* itself out.
*/
rt = ip_route_output_ports(sock_net(sk), sk,
daddr, inet->inet_saddr,
inet->inet_dport, inet->inet_sport,
sk->sk_protocol, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if);
if (IS_ERR(rt))
goto no_route;
sk_setup_caps(sk, &rt->dst);
}
skb_dst_set(skb, dst_clone(&rt->dst));
/* Queue the packet to IP for output */
rc = ip_queue_xmit(skb);
error:
/* Update stats */
if (rc >= 0) {
lsa->tx_packets++;
lsa->tx_bytes += len;
rc = len;
} else {
lsa->tx_errors++;
}
return rc;
no_route:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int noblock, int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
size_t copied = 0;
int err = -EOPNOTSUPP;
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
if (flags & MSG_OOB)
goto out;
if (addr_len)
*addr_len = sizeof(*sin);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (err)
goto done;
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
done:
skb_free_datagram(sk, skb);
out:
if (err) {
lsk->rx_errors++;
return err;
}
lsk->rx_packets++;
lsk->rx_bytes += copied;
return copied;
}
static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.init = l2tp_ip_open,
.close = l2tp_ip_close,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = l2tp_ip_sendmsg,
.recvmsg = l2tp_ip_recvmsg,
.backlog_rcv = l2tp_ip_backlog_recv,
.hash = inet_hash,
.unhash = inet_unhash,
.obj_size = sizeof(struct l2tp_ip_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
static const struct proto_ops l2tp_ip_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip_getname,
.poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw l2tp_ip_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_L2TP,
.prot = &l2tp_ip_prot,
.ops = &l2tp_ip_ops,
.no_check = 0,
};
static struct net_protocol l2tp_ip_protocol __read_mostly = {
.handler = l2tp_ip_recv,
};
static int __init l2tp_ip_init(void)
{
int err;
printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
err = proto_register(&l2tp_ip_prot, 1);
if (err != 0)
goto out;
err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
if (err)
goto out1;
inet_register_protosw(&l2tp_ip_protosw);
return 0;
out1:
proto_unregister(&l2tp_ip_prot);
out:
return err;
}
static void __exit l2tp_ip_exit(void)
{
inet_unregister_protosw(&l2tp_ip_protosw);
inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
proto_unregister(&l2tp_ip_prot);
}
module_init(l2tp_ip_init);
module_exit(l2tp_ip_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP over IP");
MODULE_VERSION("1.0");
/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
* enums
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
|
231,493,428,064,096 | /*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* MMU support
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Yaniv Kamay <yaniv@qumranet.com>
* Avi Kivity <avi@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
/*
* We need the mmu code to access both 32-bit and 64-bit guest ptes,
* so the code in this file is compiled twice, once per pte size.
*/
#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
#define FNAME(name) paging##64_##name
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_BITS PT64_LEVEL_BITS
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#define CMPXCHG cmpxchg
#else
#define CMPXCHG cmpxchg64
#define PT_MAX_FULL_LEVELS 2
#endif
#elif PTTYPE == 32
#define pt_element_t u32
#define guest_walker guest_walker32
#define FNAME(name) paging##32_##name
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
#define PT_LEVEL_BITS PT32_LEVEL_BITS
#define PT_MAX_FULL_LEVELS 2
#define CMPXCHG cmpxchg
#else
#error Invalid PTTYPE value
#endif
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
/*
* The guest_walker structure emulates the behavior of the hardware page
* table walker.
*/
struct guest_walker {
int level;
gfn_t table_gfn[PT_MAX_FULL_LEVELS];
pt_element_t ptes[PT_MAX_FULL_LEVELS];
pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
unsigned pt_access;
unsigned pte_access;
gfn_t gfn;
struct x86_exception fault;
};
static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
{
return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
}
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t table_gfn, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte)
{
pt_element_t ret;
pt_element_t *table;
struct page *page;
gpa_t gpa;
gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT,
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (gpa == UNMAPPED_GVA)
return -EFAULT;
page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa));
table = kmap_atomic(page, KM_USER0);
ret = CMPXCHG(&table[index], orig_pte, new_pte);
kunmap_atomic(table, KM_USER0);
kvm_release_page_dirty(page);
return (ret != orig_pte);
}
static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
{
unsigned access;
access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
#if PTTYPE == 64
if (vcpu->arch.mmu.nx)
access &= ~(gpte >> PT64_NX_SHIFT);
#endif
return access;
}
/*
* Fetch a guest pte for a guest virtual address
*/
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t addr, u32 access)
{
pt_element_t pte;
pt_element_t __user *ptep_user;
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
bool eperm, present, rsvd_fault;
int offset, write_fault, user_fault, fetch_fault;
write_fault = access & PFERR_WRITE_MASK;
user_fault = access & PFERR_USER_MASK;
fetch_fault = access & PFERR_FETCH_MASK;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
walk:
present = true;
eperm = rsvd_fault = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
#if PTTYPE == 64
if (walker->level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
if (!is_present_gpte(pte)) {
present = false;
goto error;
}
--walker->level;
}
#endif
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
pt_access = ACC_ALL;
for (;;) {
gfn_t real_gfn;
unsigned long host_addr;
index = PT_INDEX(addr, walker->level);
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (unlikely(real_gfn == UNMAPPED_GVA)) {
present = false;
break;
}
real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
if (unlikely(kvm_is_error_hva(host_addr))) {
present = false;
break;
}
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(copy_from_user(&pte, ptep_user, sizeof(pte)))) {
present = false;
break;
}
trace_kvm_mmu_paging_element(pte, walker->level);
if (unlikely(!is_present_gpte(pte))) {
present = false;
break;
}
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) {
rsvd_fault = true;
break;
}
if (unlikely(write_fault && !is_writable_pte(pte)
&& (user_fault || is_write_protection(vcpu))))
eperm = true;
if (unlikely(user_fault && !(pte & PT_USER_MASK)))
eperm = true;
#if PTTYPE == 64
if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
eperm = true;
#endif
if (!eperm && !rsvd_fault
&& unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn,
index, pte, pte|PT_ACCESSED_MASK);
if (ret < 0) {
present = false;
break;
} else if (ret)
goto walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
}
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
walker->ptes[walker->level - 1] = pte;
if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
((walker->level == PT_DIRECTORY_LEVEL) &&
is_large_pte(pte) &&
(PTTYPE == 64 || is_pse(vcpu))) ||
((walker->level == PT_PDPE_LEVEL) &&
is_large_pte(pte) &&
mmu->root_level == PT64_ROOT_LEVEL)) {
int lvl = walker->level;
gpa_t real_gpa;
gfn_t gfn;
u32 ac;
gfn = gpte_to_gfn_lvl(pte, lvl);
gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
if (PTTYPE == 32 &&
walker->level == PT_DIRECTORY_LEVEL &&
is_cpuid_PSE36())
gfn += pse36_gfn_delta(pte);
ac = write_fault | fetch_fault | user_fault;
real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
ac);
if (real_gpa == UNMAPPED_GVA)
return 0;
walker->gfn = real_gpa >> PAGE_SHIFT;
break;
}
pt_access = pte_access;
--walker->level;
}
if (unlikely(!present || eperm || rsvd_fault))
goto error;
if (write_fault && unlikely(!is_dirty_gpte(pte))) {
int ret;
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte,
pte|PT_DIRTY_MASK);
if (ret < 0) {
present = false;
goto error;
} else if (ret)
goto walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
walker->ptes[walker->level - 1] = pte;
}
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
__func__, (u64)pte, pte_access, pt_access);
return 1;
error:
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
walker->fault.error_code = 0;
if (present)
walker->fault.error_code |= PFERR_PRESENT_MASK;
walker->fault.error_code |= write_fault | user_fault;
if (fetch_fault && mmu->nx)
walker->fault.error_code |= PFERR_FETCH_MASK;
if (rsvd_fault)
walker->fault.error_code |= PFERR_RSVD_MASK;
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
trace_kvm_mmu_walker_error(walker->fault.error_code);
return 0;
}
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access)
{
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
access);
}
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
u32 access)
{
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
addr, access);
}
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
pt_element_t gpte)
{
u64 nonpresent = shadow_trap_nonpresent_pte;
if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
if (!is_present_gpte(gpte)) {
if (!sp->unsync)
nonpresent = shadow_notrap_nonpresent_pte;
goto no_present;
}
if (!(gpte & PT_ACCESSED_MASK))
goto no_present;
return false;
no_present:
drop_spte(vcpu->kvm, spte, nonpresent);
return true;
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
pfn_t pfn;
gpte = *(const pt_element_t *)pte;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
return;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return;
}
/*
* we call mmu_set_spte() with host_writable = true because that
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
*/
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
gpte_to_gfn(gpte), pfn, true, true);
}
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
struct guest_walker *gw, int level)
{
pt_element_t curr_pte;
gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
u64 mask;
int r, index;
if (level == PT_PAGE_TABLE_LEVEL) {
mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
base_gpa = pte_gpa & ~mask;
index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
curr_pte = gw->prefetch_ptes[index];
} else
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
&curr_pte, sizeof(curr_pte));
return r || curr_pte != gw->ptes[level - 1];
}
static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
u64 *sptep)
{
struct kvm_mmu_page *sp;
pt_element_t *gptep = gw->prefetch_ptes;
u64 *spte;
int i;
sp = page_header(__pa(sptep));
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return;
if (sp->role.direct)
return __direct_pte_prefetch(vcpu, sp, sptep);
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
pt_element_t gpte;
unsigned pte_access;
gfn_t gfn;
pfn_t pfn;
bool dirty;
if (spte == sptep)
continue;
if (*spte != shadow_trap_nonpresent_pte)
continue;
gpte = gptep[i];
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
gfn = gpte_to_gfn(gpte);
dirty = is_dirty_gpte(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
(pte_access & ACC_WRITE_MASK) && dirty);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
break;
}
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
pfn, true, true);
}
}
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int user_fault, int write_fault, int hlevel,
int *ptwrite, pfn_t pfn, bool map_writable,
bool prefault)
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *sp = NULL;
bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
int top_level;
unsigned direct_access;
struct kvm_shadow_walk_iterator it;
if (!is_present_gpte(gw->ptes[gw->level - 1]))
return NULL;
direct_access = gw->pt_access & gw->pte_access;
if (!dirty)
direct_access &= ~ACC_WRITE_MASK;
top_level = vcpu->arch.mmu.root_level;
if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL;
/*
* Verify that the top-level gpte is still there. Since the page
* is a root page, it is either write protected (and cannot be
* changed from now on) or it is invalid (in which case, we don't
* really care if it changes underneath us after this point).
*/
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) {
gfn_t table_gfn;
drop_large_spte(vcpu, it.sptep);
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access, it.sptep);
}
/*
* Verify that the gpte in the page we've just write
* protected is still there.
*/
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
goto out_gpte_changed;
if (sp)
link_shadow_page(it.sptep, sp);
}
for (;
shadow_walk_okay(&it) && it.level > hlevel;
shadow_walk_next(&it)) {
gfn_t direct_gfn;
validate_direct_spte(vcpu, it.sptep, direct_access);
drop_large_spte(vcpu, it.sptep);
if (is_shadow_present_pte(*it.sptep))
continue;
direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
true, direct_access, it.sptep);
link_shadow_page(it.sptep, sp);
}
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
user_fault, write_fault, dirty, ptwrite, it.level,
gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return it.sptep;
out_gpte_changed:
if (sp)
kvm_mmu_put_page(sp, it.sptep);
kvm_release_pfn_clean(pfn);
return NULL;
}
/*
* Page fault handler. There are several causes for a page fault:
* - there is no shadow pte for the guest pte
* - write access through a shadow pte marked read only so that we can set
* the dirty bit
* - write access to a shadow pte marked read only so we can update the page
* dirty bitmap, when userspace requests it
* - mmio access; in this case we will never install a present shadow pte
* - normal guest page fault due to the guest pte marked not present, not
* writable, or not executable
*
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or
* a negative value on error.
*/
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
bool prefault)
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
struct guest_walker walker;
u64 *sptep;
int write_pt = 0;
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
int force_pt_level;
unsigned long mmu_seq;
bool map_writable;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
/*
* Look up the guest pte for the faulting address.
*/
r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
/*
* The page is not mapped by the guest. Let the guest handle it.
*/
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
if (!prefault) {
inject_page_fault(vcpu, &walker.fault);
/* reset fork detector */
vcpu->arch.last_pt_write_count = 0;
}
return 0;
}
if (walker.level >= PT_DIRECTORY_LEVEL)
force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
else
force_pt_level = 1;
if (!force_pt_level) {
level = min(walker.level, mapping_level(vcpu, walker.gfn));
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
&map_writable))
return 0;
/* mmio */
if (is_error_pfn(pfn))
return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
level, &write_pt, pfn, map_writable, prefault);
(void)sptep;
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
sptep, *sptep, write_pt);
if (!write_pt)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++vcpu->stat.pf_fixed;
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
return write_pt;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
}
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1;
int level;
u64 *sptep;
int need_flush = 0;
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
sptep = iterator.sptep;
sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
int offset, shift;
if (!sp->unsync)
break;
shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;
pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) {
if (is_large_pte(*sptep))
--vcpu->kvm->stat.lpages;
drop_spte(vcpu->kvm, sptep,
shadow_trap_nonpresent_pte);
need_flush = 1;
} else
__set_spte(sptep, shadow_trap_nonpresent_pte);
break;
}
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
spin_unlock(&vcpu->kvm->mmu_lock);
if (pte_gpa == -1)
return;
if (mmu_topup_memory_caches(vcpu))
return;
kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
} else if (exception)
*exception = walker.fault;
return gpa;
}
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
} else if (exception)
*exception = walker.fault;
return gpa;
}
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
int i, j, offset, r;
pt_element_t pt[256 / sizeof(pt_element_t)];
gpa_t pte_gpa;
if (sp->role.direct
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
nonpaging_prefetch_page(vcpu, sp);
return;
}
pte_gpa = gfn_to_gpa(sp->gfn);
if (PTTYPE == 32) {
offset = sp->role.quadrant << PT64_LEVEL_BITS;
pte_gpa += offset * sizeof(pt_element_t);
}
for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
for (j = 0; j < ARRAY_SIZE(pt); ++j)
if (r || is_present_gpte(pt[j]))
sp->spt[i+j] = shadow_trap_nonpresent_pte;
else
sp->spt[i+j] = shadow_notrap_nonpresent_pte;
}
}
/*
* Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn
* can't change unless all sptes pointing to it are nuked first.
*
* Note:
* We should flush all tlbs if spte is dropped even though guest is
* responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
* used by guest then tlbs are not flushed, so guest is allowed to access the
* freed pages.
* And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
int i, offset, nr_present;
bool host_writable;
gpa_t first_pte_gpa;
offset = nr_present = 0;
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
pt_element_t gpte;
gpa_t pte_gpa;
gfn_t gfn;
if (!is_shadow_present_pte(sp->spt[i]))
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
return -EINVAL;
gfn = gpte_to_gfn(gpte);
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
continue;
}
if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i],
shadow_trap_nonpresent_pte);
vcpu->kvm->tlbs_dirty++;
continue;
}
nr_present++;
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
spte_to_pfn(sp->spt[i]), true, false,
host_writable);
}
return !nr_present;
}
#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef PT_LVL_ADDR_MASK
#undef PT_LVL_OFFSET_MASK
#undef PT_LEVEL_BITS
#undef PT_MAX_FULL_LEVELS
#undef gpte_to_gfn
#undef gpte_to_gfn_lvl
#undef CMPXCHG
| /*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* MMU support
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Yaniv Kamay <yaniv@qumranet.com>
* Avi Kivity <avi@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
/*
* We need the mmu code to access both 32-bit and 64-bit guest ptes,
* so the code in this file is compiled twice, once per pte size.
*/
#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
#define FNAME(name) paging##64_##name
#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_BITS PT64_LEVEL_BITS
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#define CMPXCHG cmpxchg
#else
#define CMPXCHG cmpxchg64
#define PT_MAX_FULL_LEVELS 2
#endif
#elif PTTYPE == 32
#define pt_element_t u32
#define guest_walker guest_walker32
#define FNAME(name) paging##32_##name
#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
#define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
#define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
#define PT_LEVEL_BITS PT32_LEVEL_BITS
#define PT_MAX_FULL_LEVELS 2
#define CMPXCHG cmpxchg
#else
#error Invalid PTTYPE value
#endif
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
/*
* The guest_walker structure emulates the behavior of the hardware page
* table walker.
*/
struct guest_walker {
int level;
gfn_t table_gfn[PT_MAX_FULL_LEVELS];
pt_element_t ptes[PT_MAX_FULL_LEVELS];
pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
unsigned pt_access;
unsigned pte_access;
gfn_t gfn;
struct x86_exception fault;
};
static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
{
return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
}
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t table_gfn, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte)
{
pt_element_t ret;
pt_element_t *table;
struct page *page;
gpa_t gpa;
gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT,
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (gpa == UNMAPPED_GVA)
return -EFAULT;
page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa));
table = kmap_atomic(page, KM_USER0);
ret = CMPXCHG(&table[index], orig_pte, new_pte);
kunmap_atomic(table, KM_USER0);
kvm_release_page_dirty(page);
return (ret != orig_pte);
}
static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
{
unsigned access;
access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
#if PTTYPE == 64
if (vcpu->arch.mmu.nx)
access &= ~(gpte >> PT64_NX_SHIFT);
#endif
return access;
}
/*
* Fetch a guest pte for a guest virtual address
*/
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t addr, u32 access)
{
pt_element_t pte;
pt_element_t __user *ptep_user;
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
bool eperm, present, rsvd_fault;
int offset, write_fault, user_fault, fetch_fault;
write_fault = access & PFERR_WRITE_MASK;
user_fault = access & PFERR_USER_MASK;
fetch_fault = access & PFERR_FETCH_MASK;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
walk:
present = true;
eperm = rsvd_fault = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
#if PTTYPE == 64
if (walker->level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
if (!is_present_gpte(pte)) {
present = false;
goto error;
}
--walker->level;
}
#endif
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
pt_access = ACC_ALL;
for (;;) {
gfn_t real_gfn;
unsigned long host_addr;
index = PT_INDEX(addr, walker->level);
table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset;
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (unlikely(real_gfn == UNMAPPED_GVA)) {
present = false;
break;
}
real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
if (unlikely(kvm_is_error_hva(host_addr))) {
present = false;
break;
}
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
present = false;
break;
}
trace_kvm_mmu_paging_element(pte, walker->level);
if (unlikely(!is_present_gpte(pte))) {
present = false;
break;
}
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) {
rsvd_fault = true;
break;
}
if (unlikely(write_fault && !is_writable_pte(pte)
&& (user_fault || is_write_protection(vcpu))))
eperm = true;
if (unlikely(user_fault && !(pte & PT_USER_MASK)))
eperm = true;
#if PTTYPE == 64
if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
eperm = true;
#endif
if (!eperm && !rsvd_fault
&& unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn,
index, pte, pte|PT_ACCESSED_MASK);
if (ret < 0) {
present = false;
break;
} else if (ret)
goto walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
}
pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
walker->ptes[walker->level - 1] = pte;
if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
((walker->level == PT_DIRECTORY_LEVEL) &&
is_large_pte(pte) &&
(PTTYPE == 64 || is_pse(vcpu))) ||
((walker->level == PT_PDPE_LEVEL) &&
is_large_pte(pte) &&
mmu->root_level == PT64_ROOT_LEVEL)) {
int lvl = walker->level;
gpa_t real_gpa;
gfn_t gfn;
u32 ac;
gfn = gpte_to_gfn_lvl(pte, lvl);
gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
if (PTTYPE == 32 &&
walker->level == PT_DIRECTORY_LEVEL &&
is_cpuid_PSE36())
gfn += pse36_gfn_delta(pte);
ac = write_fault | fetch_fault | user_fault;
real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
ac);
if (real_gpa == UNMAPPED_GVA)
return 0;
walker->gfn = real_gpa >> PAGE_SHIFT;
break;
}
pt_access = pte_access;
--walker->level;
}
if (unlikely(!present || eperm || rsvd_fault))
goto error;
if (write_fault && unlikely(!is_dirty_gpte(pte))) {
int ret;
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte,
pte|PT_DIRTY_MASK);
if (ret < 0) {
present = false;
goto error;
} else if (ret)
goto walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
walker->ptes[walker->level - 1] = pte;
}
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
__func__, (u64)pte, pte_access, pt_access);
return 1;
error:
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
walker->fault.error_code = 0;
if (present)
walker->fault.error_code |= PFERR_PRESENT_MASK;
walker->fault.error_code |= write_fault | user_fault;
if (fetch_fault && mmu->nx)
walker->fault.error_code |= PFERR_FETCH_MASK;
if (rsvd_fault)
walker->fault.error_code |= PFERR_RSVD_MASK;
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
trace_kvm_mmu_walker_error(walker->fault.error_code);
return 0;
}
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access)
{
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
access);
}
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
u32 access)
{
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
addr, access);
}
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
pt_element_t gpte)
{
u64 nonpresent = shadow_trap_nonpresent_pte;
if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
if (!is_present_gpte(gpte)) {
if (!sp->unsync)
nonpresent = shadow_notrap_nonpresent_pte;
goto no_present;
}
if (!(gpte & PT_ACCESSED_MASK))
goto no_present;
return false;
no_present:
drop_spte(vcpu->kvm, spte, nonpresent);
return true;
}
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte)
{
pt_element_t gpte;
unsigned pte_access;
pfn_t pfn;
gpte = *(const pt_element_t *)pte;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
return;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return;
}
/*
* we call mmu_set_spte() with host_writable = true because that
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
*/
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
gpte_to_gfn(gpte), pfn, true, true);
}
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
struct guest_walker *gw, int level)
{
pt_element_t curr_pte;
gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
u64 mask;
int r, index;
if (level == PT_PAGE_TABLE_LEVEL) {
mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
base_gpa = pte_gpa & ~mask;
index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
curr_pte = gw->prefetch_ptes[index];
} else
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
&curr_pte, sizeof(curr_pte));
return r || curr_pte != gw->ptes[level - 1];
}
static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
u64 *sptep)
{
struct kvm_mmu_page *sp;
pt_element_t *gptep = gw->prefetch_ptes;
u64 *spte;
int i;
sp = page_header(__pa(sptep));
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return;
if (sp->role.direct)
return __direct_pte_prefetch(vcpu, sp, sptep);
i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
pt_element_t gpte;
unsigned pte_access;
gfn_t gfn;
pfn_t pfn;
bool dirty;
if (spte == sptep)
continue;
if (*spte != shadow_trap_nonpresent_pte)
continue;
gpte = gptep[i];
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
gfn = gpte_to_gfn(gpte);
dirty = is_dirty_gpte(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
(pte_access & ACC_WRITE_MASK) && dirty);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
break;
}
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
pfn, true, true);
}
}
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int user_fault, int write_fault, int hlevel,
int *ptwrite, pfn_t pfn, bool map_writable,
bool prefault)
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *sp = NULL;
bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
int top_level;
unsigned direct_access;
struct kvm_shadow_walk_iterator it;
if (!is_present_gpte(gw->ptes[gw->level - 1]))
return NULL;
direct_access = gw->pt_access & gw->pte_access;
if (!dirty)
direct_access &= ~ACC_WRITE_MASK;
top_level = vcpu->arch.mmu.root_level;
if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL;
/*
* Verify that the top-level gpte is still there. Since the page
* is a root page, it is either write protected (and cannot be
* changed from now on) or it is invalid (in which case, we don't
* really care if it changes underneath us after this point).
*/
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) {
gfn_t table_gfn;
drop_large_spte(vcpu, it.sptep);
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access, it.sptep);
}
/*
* Verify that the gpte in the page we've just write
* protected is still there.
*/
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
goto out_gpte_changed;
if (sp)
link_shadow_page(it.sptep, sp);
}
for (;
shadow_walk_okay(&it) && it.level > hlevel;
shadow_walk_next(&it)) {
gfn_t direct_gfn;
validate_direct_spte(vcpu, it.sptep, direct_access);
drop_large_spte(vcpu, it.sptep);
if (is_shadow_present_pte(*it.sptep))
continue;
direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
true, direct_access, it.sptep);
link_shadow_page(it.sptep, sp);
}
mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
user_fault, write_fault, dirty, ptwrite, it.level,
gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
return it.sptep;
out_gpte_changed:
if (sp)
kvm_mmu_put_page(sp, it.sptep);
kvm_release_pfn_clean(pfn);
return NULL;
}
/*
* Page fault handler. There are several causes for a page fault:
* - there is no shadow pte for the guest pte
* - write access through a shadow pte marked read only so that we can set
* the dirty bit
* - write access to a shadow pte marked read only so we can update the page
* dirty bitmap, when userspace requests it
* - mmio access; in this case we will never install a present shadow pte
* - normal guest page fault due to the guest pte marked not present, not
* writable, or not executable
*
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or
* a negative value on error.
*/
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
bool prefault)
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
struct guest_walker walker;
u64 *sptep;
int write_pt = 0;
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
int force_pt_level;
unsigned long mmu_seq;
bool map_writable;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
/*
* Look up the guest pte for the faulting address.
*/
r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
/*
* The page is not mapped by the guest. Let the guest handle it.
*/
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
if (!prefault) {
inject_page_fault(vcpu, &walker.fault);
/* reset fork detector */
vcpu->arch.last_pt_write_count = 0;
}
return 0;
}
if (walker.level >= PT_DIRECTORY_LEVEL)
force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
else
force_pt_level = 1;
if (!force_pt_level) {
level = min(walker.level, mapping_level(vcpu, walker.gfn));
walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
&map_writable))
return 0;
/* mmio */
if (is_error_pfn(pfn))
return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
kvm_mmu_free_some_pages(vcpu);
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
level, &write_pt, pfn, map_writable, prefault);
(void)sptep;
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
sptep, *sptep, write_pt);
if (!write_pt)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++vcpu->stat.pf_fixed;
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
return write_pt;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return 0;
}
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
gpa_t pte_gpa = -1;
int level;
u64 *sptep;
int need_flush = 0;
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
sptep = iterator.sptep;
sp = page_header(__pa(sptep));
if (is_last_spte(*sptep, level)) {
int offset, shift;
if (!sp->unsync)
break;
shift = PAGE_SHIFT -
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
offset = sp->role.quadrant << shift;
pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (is_shadow_present_pte(*sptep)) {
if (is_large_pte(*sptep))
--vcpu->kvm->stat.lpages;
drop_spte(vcpu->kvm, sptep,
shadow_trap_nonpresent_pte);
need_flush = 1;
} else
__set_spte(sptep, shadow_trap_nonpresent_pte);
break;
}
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
break;
}
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
spin_unlock(&vcpu->kvm->mmu_lock);
if (pte_gpa == -1)
return;
if (mmu_topup_memory_caches(vcpu))
return;
kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
} else if (exception)
*exception = walker.fault;
return gpa;
}
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
gpa |= vaddr & ~PAGE_MASK;
} else if (exception)
*exception = walker.fault;
return gpa;
}
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
int i, j, offset, r;
pt_element_t pt[256 / sizeof(pt_element_t)];
gpa_t pte_gpa;
if (sp->role.direct
|| (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
nonpaging_prefetch_page(vcpu, sp);
return;
}
pte_gpa = gfn_to_gpa(sp->gfn);
if (PTTYPE == 32) {
offset = sp->role.quadrant << PT64_LEVEL_BITS;
pte_gpa += offset * sizeof(pt_element_t);
}
for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
for (j = 0; j < ARRAY_SIZE(pt); ++j)
if (r || is_present_gpte(pt[j]))
sp->spt[i+j] = shadow_trap_nonpresent_pte;
else
sp->spt[i+j] = shadow_notrap_nonpresent_pte;
}
}
/*
* Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn
* can't change unless all sptes pointing to it are nuked first.
*
* Note:
* We should flush all tlbs if spte is dropped even though guest is
* responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
* and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
* used by guest then tlbs are not flushed, so guest is allowed to access the
* freed pages.
* And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
int i, offset, nr_present;
bool host_writable;
gpa_t first_pte_gpa;
offset = nr_present = 0;
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
unsigned pte_access;
pt_element_t gpte;
gpa_t pte_gpa;
gfn_t gfn;
if (!is_shadow_present_pte(sp->spt[i]))
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
sizeof(pt_element_t)))
return -EINVAL;
gfn = gpte_to_gfn(gpte);
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
continue;
}
if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i],
shadow_trap_nonpresent_pte);
vcpu->kvm->tlbs_dirty++;
continue;
}
nr_present++;
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
spte_to_pfn(sp->spt[i]), true, false,
host_writable);
}
return !nr_present;
}
#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef PT_LVL_ADDR_MASK
#undef PT_LVL_OFFSET_MASK
#undef PT_LEVEL_BITS
#undef PT_MAX_FULL_LEVELS
#undef gpte_to_gfn
#undef gpte_to_gfn_lvl
#undef CMPXCHG
|
1,780,343,330,315 | /*
* AppArmor security module
*
* This file contains AppArmor LSM hooks.
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/security.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/ptrace.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <net/sock.h>
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
#include "include/capability.h"
#include "include/context.h"
#include "include/file.h"
#include "include/ipc.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/procattr.h"
/* Flag indicating whether initialization completed */
int apparmor_initialized __initdata;
/*
* LSM hook functions
*/
/*
* free the associated aa_task_cxt and put its profiles
*/
static void apparmor_cred_free(struct cred *cred)
{
aa_free_task_context(cred->security);
cred->security = NULL;
}
/*
* allocate the apparmor part of blank credentials
*/
static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
/* freed by apparmor_cred_free */
struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
if (!cxt)
return -ENOMEM;
cred->security = cxt;
return 0;
}
/*
* prepare new aa_task_cxt for modification by prepare_cred block
*/
static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
/* freed by apparmor_cred_free */
struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
if (!cxt)
return -ENOMEM;
aa_dup_task_context(cxt, old->security);
new->security = cxt;
return 0;
}
/*
* transfer the apparmor data to a blank set of creds
*/
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
{
const struct aa_task_cxt *old_cxt = old->security;
struct aa_task_cxt *new_cxt = new->security;
aa_dup_task_context(new_cxt, old_cxt);
}
static int apparmor_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int error = cap_ptrace_access_check(child, mode);
if (error)
return error;
return aa_ptrace(current, child, mode);
}
static int apparmor_ptrace_traceme(struct task_struct *parent)
{
int error = cap_ptrace_traceme(parent);
if (error)
return error;
return aa_ptrace(parent, current, PTRACE_MODE_ATTACH);
}
/* Derived from security/commoncap.c:cap_capget */
static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
struct aa_profile *profile;
const struct cred *cred;
rcu_read_lock();
cred = __task_cred(target);
profile = aa_cred_profile(cred);
*effective = cred->cap_effective;
*inheritable = cred->cap_inheritable;
*permitted = cred->cap_permitted;
if (!unconfined(profile)) {
*effective = cap_intersect(*effective, profile->caps.allow);
*permitted = cap_intersect(*permitted, profile->caps.allow);
}
rcu_read_unlock();
return 0;
}
static int apparmor_capable(struct task_struct *task, const struct cred *cred,
struct user_namespace *ns, int cap, int audit)
{
struct aa_profile *profile;
/* cap_capable returns 0 on success, else -EPERM */
int error = cap_capable(task, cred, ns, cap, audit);
if (!error) {
profile = aa_cred_profile(cred);
if (!unconfined(profile))
error = aa_capable(task, profile, cap, audit);
}
return error;
}
/**
* common_perm - basic common permission check wrapper fn for paths
* @op: operation being checked
* @path: path to check permission of (NOT NULL)
* @mask: requested permissions mask
* @cond: conditional info for the permission request (NOT NULL)
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm(int op, struct path *path, u32 mask,
struct path_cond *cond)
{
struct aa_profile *profile;
int error = 0;
profile = __aa_current_profile();
if (!unconfined(profile))
error = aa_path_perm(op, profile, path, 0, mask, cond);
return error;
}
/**
* common_perm_dir_dentry - common permission wrapper when path is dir, dentry
* @op: operation being checked
* @dir: directory of the dentry (NOT NULL)
* @dentry: dentry to check (NOT NULL)
* @mask: requested permissions mask
* @cond: conditional info for the permission request (NOT NULL)
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_dir_dentry(int op, struct path *dir,
struct dentry *dentry, u32 mask,
struct path_cond *cond)
{
struct path path = { dir->mnt, dentry };
return common_perm(op, &path, mask, cond);
}
/**
* common_perm_mnt_dentry - common permission wrapper when mnt, dentry
* @op: operation being checked
* @mnt: mount point of dentry (NOT NULL)
* @dentry: dentry to check (NOT NULL)
* @mask: requested permissions mask
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
struct dentry *dentry, u32 mask)
{
struct path path = { mnt, dentry };
struct path_cond cond = { dentry->d_inode->i_uid,
dentry->d_inode->i_mode
};
return common_perm(op, &path, mask, &cond);
}
/**
* common_perm_rm - common permission wrapper for operations doing rm
* @op: operation being checked
* @dir: directory that the dentry is in (NOT NULL)
* @dentry: dentry being rm'd (NOT NULL)
* @mask: requested permission mask
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_rm(int op, struct path *dir,
struct dentry *dentry, u32 mask)
{
struct inode *inode = dentry->d_inode;
struct path_cond cond = { };
if (!inode || !dir->mnt || !mediated_filesystem(inode))
return 0;
cond.uid = inode->i_uid;
cond.mode = inode->i_mode;
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
}
/**
* common_perm_create - common permission wrapper for operations doing create
* @op: operation being checked
* @dir: directory that dentry will be created in (NOT NULL)
* @dentry: dentry to create (NOT NULL)
* @mask: request permission mask
* @mode: created file mode
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
u32 mask, umode_t mode)
{
struct path_cond cond = { current_fsuid(), mode };
if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
return 0;
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
}
static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
}
static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
int mode)
{
return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
S_IFDIR);
}
static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
}
static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
int mode, unsigned int dev)
{
return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
}
static int apparmor_path_truncate(struct path *path)
{
struct path_cond cond = { path->dentry->d_inode->i_uid,
path->dentry->d_inode->i_mode
};
if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
return 0;
return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
&cond);
}
static int apparmor_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
S_IFLNK);
}
static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
{
struct aa_profile *profile;
int error = 0;
if (!mediated_filesystem(old_dentry->d_inode))
return 0;
profile = aa_current_profile();
if (!unconfined(profile))
error = aa_path_link(profile, old_dentry, new_dir, new_dentry);
return error;
}
static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry)
{
struct aa_profile *profile;
int error = 0;
if (!mediated_filesystem(old_dentry->d_inode))
return 0;
profile = aa_current_profile();
if (!unconfined(profile)) {
struct path old_path = { old_dir->mnt, old_dentry };
struct path new_path = { new_dir->mnt, new_dentry };
struct path_cond cond = { old_dentry->d_inode->i_uid,
old_dentry->d_inode->i_mode
};
error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0,
MAY_READ | AA_MAY_META_READ | MAY_WRITE |
AA_MAY_META_WRITE | AA_MAY_DELETE,
&cond);
if (!error)
error = aa_path_perm(OP_RENAME_DEST, profile, &new_path,
0, MAY_WRITE | AA_MAY_META_WRITE |
AA_MAY_CREATE, &cond);
}
return error;
}
static int apparmor_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
mode_t mode)
{
if (!mediated_filesystem(dentry->d_inode))
return 0;
return common_perm_mnt_dentry(OP_CHMOD, mnt, dentry, AA_MAY_CHMOD);
}
static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid)
{
struct path_cond cond = { path->dentry->d_inode->i_uid,
path->dentry->d_inode->i_mode
};
if (!mediated_filesystem(path->dentry->d_inode))
return 0;
return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
}
static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
if (!mediated_filesystem(dentry->d_inode))
return 0;
return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
AA_MAY_META_READ);
}
static int apparmor_dentry_open(struct file *file, const struct cred *cred)
{
struct aa_file_cxt *fcxt = file->f_security;
struct aa_profile *profile;
int error = 0;
if (!mediated_filesystem(file->f_path.dentry->d_inode))
return 0;
/* If in exec, permission is handled by bprm hooks.
* Cache permissions granted by the previous exec check, with
* implicit read and executable mmap which are required to
* actually execute the image.
*/
if (current->in_execve) {
fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
return 0;
}
profile = aa_cred_profile(cred);
if (!unconfined(profile)) {
struct inode *inode = file->f_path.dentry->d_inode;
struct path_cond cond = { inode->i_uid, inode->i_mode };
error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
aa_map_file_to_perms(file), &cond);
/* todo cache full allowed permissions set and state */
fcxt->allow = aa_map_file_to_perms(file);
}
return error;
}
static int apparmor_file_alloc_security(struct file *file)
{
/* freed by apparmor_file_free_security */
file->f_security = aa_alloc_file_context(GFP_KERNEL);
if (!file->f_security)
return -ENOMEM;
return 0;
}
static void apparmor_file_free_security(struct file *file)
{
struct aa_file_cxt *cxt = file->f_security;
aa_free_file_context(cxt);
}
static int common_file_perm(int op, struct file *file, u32 mask)
{
struct aa_file_cxt *fcxt = file->f_security;
struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred);
int error = 0;
BUG_ON(!fprofile);
if (!file->f_path.mnt ||
!mediated_filesystem(file->f_path.dentry->d_inode))
return 0;
profile = __aa_current_profile();
/* revalidate access, if task is unconfined, or the cached cred
* doesn't match or if the request is for more permissions than
* was granted.
*
* Note: the test for !unconfined(fprofile) is to handle file
* delegation from unconfined tasks
*/
if (!unconfined(profile) && !unconfined(fprofile) &&
((fprofile != profile) || (mask & ~fcxt->allow)))
error = aa_file_perm(op, profile, file, mask);
return error;
}
static int apparmor_file_permission(struct file *file, int mask)
{
return common_file_perm(OP_FPERM, file, mask);
}
static int apparmor_file_lock(struct file *file, unsigned int cmd)
{
u32 mask = AA_MAY_LOCK;
if (cmd == F_WRLCK)
mask |= MAY_WRITE;
return common_file_perm(OP_FLOCK, file, mask);
}
static int common_mmap(int op, struct file *file, unsigned long prot,
unsigned long flags)
{
struct dentry *dentry;
int mask = 0;
if (!file || !file->f_security)
return 0;
if (prot & PROT_READ)
mask |= MAY_READ;
/*
* Private mappings don't require write perms since they don't
* write back to the files
*/
if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
mask |= MAY_WRITE;
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
dentry = file->f_path.dentry;
return common_file_perm(op, file, mask);
}
static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags,
unsigned long addr, unsigned long addr_only)
{
int rc = 0;
/* do DAC check */
rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
if (rc || addr_only)
return rc;
return common_mmap(OP_FMMAP, file, prot, flags);
}
static int apparmor_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
{
return common_mmap(OP_FMPROT, vma->vm_file, prot,
!(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
}
static int apparmor_getprocattr(struct task_struct *task, char *name,
char **value)
{
int error = -ENOENT;
struct aa_profile *profile;
/* released below */
const struct cred *cred = get_task_cred(task);
struct aa_task_cxt *cxt = cred->security;
profile = aa_cred_profile(cred);
if (strcmp(name, "current") == 0)
error = aa_getprocattr(aa_newest_version(cxt->profile),
value);
else if (strcmp(name, "prev") == 0 && cxt->previous)
error = aa_getprocattr(aa_newest_version(cxt->previous),
value);
else if (strcmp(name, "exec") == 0 && cxt->onexec)
error = aa_getprocattr(aa_newest_version(cxt->onexec),
value);
else
error = -EINVAL;
put_cred(cred);
return error;
}
static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else if (strcmp(command, "permipc") == 0) {
error = aa_setprocattr_permipc(args);
} else {
struct common_audit_data sa;
COMMON_AUDIT_DATA_INIT(&sa, NONE);
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
} else {
/* only support the "current" and "exec" process attributes */
return -EINVAL;
}
if (!error)
error = size;
return error;
}
static int apparmor_task_setrlimit(struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
struct aa_profile *profile = aa_current_profile();
int error = 0;
if (!unconfined(profile))
error = aa_task_setrlimit(profile, task, resource, new_rlim);
return error;
}
static struct security_operations apparmor_ops = {
.name = "apparmor",
.ptrace_access_check = apparmor_ptrace_access_check,
.ptrace_traceme = apparmor_ptrace_traceme,
.capget = apparmor_capget,
.capable = apparmor_capable,
.path_link = apparmor_path_link,
.path_unlink = apparmor_path_unlink,
.path_symlink = apparmor_path_symlink,
.path_mkdir = apparmor_path_mkdir,
.path_rmdir = apparmor_path_rmdir,
.path_mknod = apparmor_path_mknod,
.path_rename = apparmor_path_rename,
.path_chmod = apparmor_path_chmod,
.path_chown = apparmor_path_chown,
.path_truncate = apparmor_path_truncate,
.dentry_open = apparmor_dentry_open,
.inode_getattr = apparmor_inode_getattr,
.file_permission = apparmor_file_permission,
.file_alloc_security = apparmor_file_alloc_security,
.file_free_security = apparmor_file_free_security,
.file_mmap = apparmor_file_mmap,
.file_mprotect = apparmor_file_mprotect,
.file_lock = apparmor_file_lock,
.getprocattr = apparmor_getprocattr,
.setprocattr = apparmor_setprocattr,
.cred_alloc_blank = apparmor_cred_alloc_blank,
.cred_free = apparmor_cred_free,
.cred_prepare = apparmor_cred_prepare,
.cred_transfer = apparmor_cred_transfer,
.bprm_set_creds = apparmor_bprm_set_creds,
.bprm_committing_creds = apparmor_bprm_committing_creds,
.bprm_committed_creds = apparmor_bprm_committed_creds,
.bprm_secureexec = apparmor_bprm_secureexec,
.task_setrlimit = apparmor_task_setrlimit,
};
/*
* AppArmor sysfs module parameters
*/
static int param_set_aabool(const char *val, const struct kernel_param *kp);
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
#define param_check_aabool(name, p) __param_check(name, p, int)
static struct kernel_param_ops param_ops_aabool = {
.set = param_set_aabool,
.get = param_get_aabool
};
static int param_set_aauint(const char *val, const struct kernel_param *kp);
static int param_get_aauint(char *buffer, const struct kernel_param *kp);
#define param_check_aauint(name, p) __param_check(name, p, int)
static struct kernel_param_ops param_ops_aauint = {
.set = param_set_aauint,
.get = param_get_aauint
};
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy(name, p) __param_check(name, p, int)
static struct kernel_param_ops param_ops_aalockpolicy = {
.set = param_set_aalockpolicy,
.get = param_get_aalockpolicy
};
static int param_set_audit(const char *val, struct kernel_param *kp);
static int param_get_audit(char *buffer, struct kernel_param *kp);
static int param_set_mode(const char *val, struct kernel_param *kp);
static int param_get_mode(char *buffer, struct kernel_param *kp);
/* Flag values, also controllable via /sys/module/apparmor/parameters
* We define special types as we want to do additional mediation.
*/
/* AppArmor global enforcement switch - complain, enforce, kill */
enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
module_param_call(mode, param_set_mode, param_get_mode,
&aa_g_profile_mode, S_IRUSR | S_IWUSR);
/* Debug mode */
int aa_g_debug;
module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
/* Audit mode */
enum audit_mode aa_g_audit;
module_param_call(audit, param_set_audit, param_get_audit,
&aa_g_audit, S_IRUSR | S_IWUSR);
/* Determines if audit header is included in audited messages. This
* provides more context if the audit daemon is not running
*/
int aa_g_audit_header = 1;
module_param_named(audit_header, aa_g_audit_header, aabool,
S_IRUSR | S_IWUSR);
/* lock out loading/removal of policy
* TODO: add in at boot loading of policy, which is the only way to
* load policy, if lock_policy is set
*/
int aa_g_lock_policy;
module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
S_IRUSR | S_IWUSR);
/* Syscall logging mode */
int aa_g_logsyscall;
module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
/* Maximum pathname length before accesses will start getting rejected */
unsigned int aa_g_path_max = 2 * PATH_MAX;
module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR);
/* Determines how paranoid loading of policy is and how much verification
* on the loaded policy is done.
*/
int aa_g_paranoid_load = 1;
module_param_named(paranoid_load, aa_g_paranoid_load, aabool,
S_IRUSR | S_IWUSR);
/* Boot time disable flag */
static unsigned int apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR);
static int __init apparmor_enabled_setup(char *str)
{
unsigned long enabled;
int error = strict_strtoul(str, 0, &enabled);
if (!error)
apparmor_enabled = enabled ? 1 : 0;
return 1;
}
__setup("apparmor=", apparmor_enabled_setup);
/* set global flag turning off the ability to load policy */
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (aa_g_lock_policy)
return -EACCES;
return param_set_bool(val, kp);
}
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aabool(const char *val, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_set_bool(val, kp);
}
static int param_get_aabool(char *buffer, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aauint(const char *val, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_set_uint(val, kp);
}
static int param_get_aauint(char *buffer, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_get_uint(buffer, kp);
}
static int param_get_audit(char *buffer, struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
}
static int param_set_audit(const char *val, struct kernel_param *kp)
{
int i;
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
if (!val)
return -EINVAL;
for (i = 0; i < AUDIT_MAX_INDEX; i++) {
if (strcmp(val, audit_mode_names[i]) == 0) {
aa_g_audit = i;
return 0;
}
}
return -EINVAL;
}
static int param_get_mode(char *buffer, struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]);
}
static int param_set_mode(const char *val, struct kernel_param *kp)
{
int i;
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
if (!val)
return -EINVAL;
for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) {
if (strcmp(val, profile_mode_names[i]) == 0) {
aa_g_profile_mode = i;
return 0;
}
}
return -EINVAL;
}
/*
* AppArmor init functions
*/
/**
* set_init_cxt - set a task context and profile on the first task.
*
* TODO: allow setting an alternate profile than unconfined
*/
static int __init set_init_cxt(void)
{
struct cred *cred = (struct cred *)current->real_cred;
struct aa_task_cxt *cxt;
cxt = aa_alloc_task_context(GFP_KERNEL);
if (!cxt)
return -ENOMEM;
cxt->profile = aa_get_profile(root_ns->unconfined);
cred->security = cxt;
return 0;
}
static int __init apparmor_init(void)
{
int error;
if (!apparmor_enabled || !security_module_enable(&apparmor_ops)) {
aa_info_message("AppArmor disabled by boot time parameter");
apparmor_enabled = 0;
return 0;
}
error = aa_alloc_root_ns();
if (error) {
AA_ERROR("Unable to allocate default profile namespace\n");
goto alloc_out;
}
error = set_init_cxt();
if (error) {
AA_ERROR("Failed to set context on init task\n");
goto register_security_out;
}
error = register_security(&apparmor_ops);
if (error) {
AA_ERROR("Unable to register AppArmor\n");
goto set_init_cxt_out;
}
/* Report that AppArmor successfully initialized */
apparmor_initialized = 1;
if (aa_g_profile_mode == APPARMOR_COMPLAIN)
aa_info_message("AppArmor initialized: complain mode enabled");
else if (aa_g_profile_mode == APPARMOR_KILL)
aa_info_message("AppArmor initialized: kill mode enabled");
else
aa_info_message("AppArmor initialized");
return error;
set_init_cxt_out:
aa_free_task_context(current->real_cred->security);
register_security_out:
aa_free_root_ns();
alloc_out:
aa_destroy_aafs();
apparmor_enabled = 0;
return error;
}
security_initcall(apparmor_init);
| /*
* AppArmor security module
*
* This file contains AppArmor LSM hooks.
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/security.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/ptrace.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <net/sock.h>
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
#include "include/capability.h"
#include "include/context.h"
#include "include/file.h"
#include "include/ipc.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/procattr.h"
/* Flag indicating whether initialization completed */
int apparmor_initialized __initdata;
/*
* LSM hook functions
*/
/*
* free the associated aa_task_cxt and put its profiles
*/
static void apparmor_cred_free(struct cred *cred)
{
aa_free_task_context(cred->security);
cred->security = NULL;
}
/*
* allocate the apparmor part of blank credentials
*/
static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
/* freed by apparmor_cred_free */
struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
if (!cxt)
return -ENOMEM;
cred->security = cxt;
return 0;
}
/*
* prepare new aa_task_cxt for modification by prepare_cred block
*/
static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
/* freed by apparmor_cred_free */
struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
if (!cxt)
return -ENOMEM;
aa_dup_task_context(cxt, old->security);
new->security = cxt;
return 0;
}
/*
* transfer the apparmor data to a blank set of creds
*/
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
{
const struct aa_task_cxt *old_cxt = old->security;
struct aa_task_cxt *new_cxt = new->security;
aa_dup_task_context(new_cxt, old_cxt);
}
static int apparmor_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int error = cap_ptrace_access_check(child, mode);
if (error)
return error;
return aa_ptrace(current, child, mode);
}
static int apparmor_ptrace_traceme(struct task_struct *parent)
{
int error = cap_ptrace_traceme(parent);
if (error)
return error;
return aa_ptrace(parent, current, PTRACE_MODE_ATTACH);
}
/* Derived from security/commoncap.c:cap_capget */
static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
struct aa_profile *profile;
const struct cred *cred;
rcu_read_lock();
cred = __task_cred(target);
profile = aa_cred_profile(cred);
*effective = cred->cap_effective;
*inheritable = cred->cap_inheritable;
*permitted = cred->cap_permitted;
if (!unconfined(profile)) {
*effective = cap_intersect(*effective, profile->caps.allow);
*permitted = cap_intersect(*permitted, profile->caps.allow);
}
rcu_read_unlock();
return 0;
}
static int apparmor_capable(struct task_struct *task, const struct cred *cred,
struct user_namespace *ns, int cap, int audit)
{
struct aa_profile *profile;
/* cap_capable returns 0 on success, else -EPERM */
int error = cap_capable(task, cred, ns, cap, audit);
if (!error) {
profile = aa_cred_profile(cred);
if (!unconfined(profile))
error = aa_capable(task, profile, cap, audit);
}
return error;
}
/**
* common_perm - basic common permission check wrapper fn for paths
* @op: operation being checked
* @path: path to check permission of (NOT NULL)
* @mask: requested permissions mask
* @cond: conditional info for the permission request (NOT NULL)
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm(int op, struct path *path, u32 mask,
struct path_cond *cond)
{
struct aa_profile *profile;
int error = 0;
profile = __aa_current_profile();
if (!unconfined(profile))
error = aa_path_perm(op, profile, path, 0, mask, cond);
return error;
}
/**
* common_perm_dir_dentry - common permission wrapper when path is dir, dentry
* @op: operation being checked
* @dir: directory of the dentry (NOT NULL)
* @dentry: dentry to check (NOT NULL)
* @mask: requested permissions mask
* @cond: conditional info for the permission request (NOT NULL)
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_dir_dentry(int op, struct path *dir,
struct dentry *dentry, u32 mask,
struct path_cond *cond)
{
struct path path = { dir->mnt, dentry };
return common_perm(op, &path, mask, cond);
}
/**
* common_perm_mnt_dentry - common permission wrapper when mnt, dentry
* @op: operation being checked
* @mnt: mount point of dentry (NOT NULL)
* @dentry: dentry to check (NOT NULL)
* @mask: requested permissions mask
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
struct dentry *dentry, u32 mask)
{
struct path path = { mnt, dentry };
struct path_cond cond = { dentry->d_inode->i_uid,
dentry->d_inode->i_mode
};
return common_perm(op, &path, mask, &cond);
}
/**
* common_perm_rm - common permission wrapper for operations doing rm
* @op: operation being checked
* @dir: directory that the dentry is in (NOT NULL)
* @dentry: dentry being rm'd (NOT NULL)
* @mask: requested permission mask
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_rm(int op, struct path *dir,
struct dentry *dentry, u32 mask)
{
struct inode *inode = dentry->d_inode;
struct path_cond cond = { };
if (!inode || !dir->mnt || !mediated_filesystem(inode))
return 0;
cond.uid = inode->i_uid;
cond.mode = inode->i_mode;
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
}
/**
* common_perm_create - common permission wrapper for operations doing create
* @op: operation being checked
* @dir: directory that dentry will be created in (NOT NULL)
* @dentry: dentry to create (NOT NULL)
* @mask: request permission mask
* @mode: created file mode
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
u32 mask, umode_t mode)
{
struct path_cond cond = { current_fsuid(), mode };
if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
return 0;
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
}
static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
}
static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
int mode)
{
return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
S_IFDIR);
}
static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
}
static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
int mode, unsigned int dev)
{
return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
}
static int apparmor_path_truncate(struct path *path)
{
struct path_cond cond = { path->dentry->d_inode->i_uid,
path->dentry->d_inode->i_mode
};
if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
return 0;
return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
&cond);
}
static int apparmor_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
S_IFLNK);
}
static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
{
struct aa_profile *profile;
int error = 0;
if (!mediated_filesystem(old_dentry->d_inode))
return 0;
profile = aa_current_profile();
if (!unconfined(profile))
error = aa_path_link(profile, old_dentry, new_dir, new_dentry);
return error;
}
static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry)
{
struct aa_profile *profile;
int error = 0;
if (!mediated_filesystem(old_dentry->d_inode))
return 0;
profile = aa_current_profile();
if (!unconfined(profile)) {
struct path old_path = { old_dir->mnt, old_dentry };
struct path new_path = { new_dir->mnt, new_dentry };
struct path_cond cond = { old_dentry->d_inode->i_uid,
old_dentry->d_inode->i_mode
};
error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0,
MAY_READ | AA_MAY_META_READ | MAY_WRITE |
AA_MAY_META_WRITE | AA_MAY_DELETE,
&cond);
if (!error)
error = aa_path_perm(OP_RENAME_DEST, profile, &new_path,
0, MAY_WRITE | AA_MAY_META_WRITE |
AA_MAY_CREATE, &cond);
}
return error;
}
static int apparmor_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
mode_t mode)
{
if (!mediated_filesystem(dentry->d_inode))
return 0;
return common_perm_mnt_dentry(OP_CHMOD, mnt, dentry, AA_MAY_CHMOD);
}
static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid)
{
struct path_cond cond = { path->dentry->d_inode->i_uid,
path->dentry->d_inode->i_mode
};
if (!mediated_filesystem(path->dentry->d_inode))
return 0;
return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
}
static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
if (!mediated_filesystem(dentry->d_inode))
return 0;
return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
AA_MAY_META_READ);
}
static int apparmor_dentry_open(struct file *file, const struct cred *cred)
{
struct aa_file_cxt *fcxt = file->f_security;
struct aa_profile *profile;
int error = 0;
if (!mediated_filesystem(file->f_path.dentry->d_inode))
return 0;
/* If in exec, permission is handled by bprm hooks.
* Cache permissions granted by the previous exec check, with
* implicit read and executable mmap which are required to
* actually execute the image.
*/
if (current->in_execve) {
fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
return 0;
}
profile = aa_cred_profile(cred);
if (!unconfined(profile)) {
struct inode *inode = file->f_path.dentry->d_inode;
struct path_cond cond = { inode->i_uid, inode->i_mode };
error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
aa_map_file_to_perms(file), &cond);
/* todo cache full allowed permissions set and state */
fcxt->allow = aa_map_file_to_perms(file);
}
return error;
}
static int apparmor_file_alloc_security(struct file *file)
{
/* freed by apparmor_file_free_security */
file->f_security = aa_alloc_file_context(GFP_KERNEL);
if (!file->f_security)
return -ENOMEM;
return 0;
}
static void apparmor_file_free_security(struct file *file)
{
struct aa_file_cxt *cxt = file->f_security;
aa_free_file_context(cxt);
}
static int common_file_perm(int op, struct file *file, u32 mask)
{
struct aa_file_cxt *fcxt = file->f_security;
struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred);
int error = 0;
BUG_ON(!fprofile);
if (!file->f_path.mnt ||
!mediated_filesystem(file->f_path.dentry->d_inode))
return 0;
profile = __aa_current_profile();
/* revalidate access, if task is unconfined, or the cached cred
* doesn't match or if the request is for more permissions than
* was granted.
*
* Note: the test for !unconfined(fprofile) is to handle file
* delegation from unconfined tasks
*/
if (!unconfined(profile) && !unconfined(fprofile) &&
((fprofile != profile) || (mask & ~fcxt->allow)))
error = aa_file_perm(op, profile, file, mask);
return error;
}
static int apparmor_file_permission(struct file *file, int mask)
{
return common_file_perm(OP_FPERM, file, mask);
}
static int apparmor_file_lock(struct file *file, unsigned int cmd)
{
u32 mask = AA_MAY_LOCK;
if (cmd == F_WRLCK)
mask |= MAY_WRITE;
return common_file_perm(OP_FLOCK, file, mask);
}
static int common_mmap(int op, struct file *file, unsigned long prot,
unsigned long flags)
{
struct dentry *dentry;
int mask = 0;
if (!file || !file->f_security)
return 0;
if (prot & PROT_READ)
mask |= MAY_READ;
/*
* Private mappings don't require write perms since they don't
* write back to the files
*/
if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
mask |= MAY_WRITE;
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
dentry = file->f_path.dentry;
return common_file_perm(op, file, mask);
}
static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags,
unsigned long addr, unsigned long addr_only)
{
int rc = 0;
/* do DAC check */
rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
if (rc || addr_only)
return rc;
return common_mmap(OP_FMMAP, file, prot, flags);
}
static int apparmor_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
{
return common_mmap(OP_FMPROT, vma->vm_file, prot,
!(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
}
static int apparmor_getprocattr(struct task_struct *task, char *name,
char **value)
{
int error = -ENOENT;
struct aa_profile *profile;
/* released below */
const struct cred *cred = get_task_cred(task);
struct aa_task_cxt *cxt = cred->security;
profile = aa_cred_profile(cred);
if (strcmp(name, "current") == 0)
error = aa_getprocattr(aa_newest_version(cxt->profile),
value);
else if (strcmp(name, "prev") == 0 && cxt->previous)
error = aa_getprocattr(aa_newest_version(cxt->previous),
value);
else if (strcmp(name, "exec") == 0 && cxt->onexec)
error = aa_getprocattr(aa_newest_version(cxt->onexec),
value);
else
error = -EINVAL;
put_cred(cred);
return error;
}
static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
char *command, *args = value;
size_t arg_size;
int error;
if (size == 0)
return -EINVAL;
/* args points to a PAGE_SIZE buffer, AppArmor requires that
* the buffer must be null terminated or have size <= PAGE_SIZE -1
* so that AppArmor can null terminate them
*/
if (args[size - 1] != '\0') {
if (size == PAGE_SIZE)
return -EINVAL;
args[size] = '\0';
}
/* task can only write its own attributes */
if (current != task)
return -EACCES;
args = value;
args = strim(args);
command = strsep(&args, " ");
if (!args)
return -EINVAL;
args = skip_spaces(args);
if (!*args)
return -EINVAL;
arg_size = size - (args - (char *) value);
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
!AA_DO_TEST);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_DO_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
!AA_DO_TEST);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
} else if (strcmp(command, "permipc") == 0) {
error = aa_setprocattr_permipc(args);
} else {
struct common_audit_data sa;
COMMON_AUDIT_DATA_INIT(&sa, NONE);
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED,
__aa_current_profile(), GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {
error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
!AA_DO_TEST);
} else {
/* only support the "current" and "exec" process attributes */
return -EINVAL;
}
if (!error)
error = size;
return error;
}
static int apparmor_task_setrlimit(struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
struct aa_profile *profile = aa_current_profile();
int error = 0;
if (!unconfined(profile))
error = aa_task_setrlimit(profile, task, resource, new_rlim);
return error;
}
static struct security_operations apparmor_ops = {
.name = "apparmor",
.ptrace_access_check = apparmor_ptrace_access_check,
.ptrace_traceme = apparmor_ptrace_traceme,
.capget = apparmor_capget,
.capable = apparmor_capable,
.path_link = apparmor_path_link,
.path_unlink = apparmor_path_unlink,
.path_symlink = apparmor_path_symlink,
.path_mkdir = apparmor_path_mkdir,
.path_rmdir = apparmor_path_rmdir,
.path_mknod = apparmor_path_mknod,
.path_rename = apparmor_path_rename,
.path_chmod = apparmor_path_chmod,
.path_chown = apparmor_path_chown,
.path_truncate = apparmor_path_truncate,
.dentry_open = apparmor_dentry_open,
.inode_getattr = apparmor_inode_getattr,
.file_permission = apparmor_file_permission,
.file_alloc_security = apparmor_file_alloc_security,
.file_free_security = apparmor_file_free_security,
.file_mmap = apparmor_file_mmap,
.file_mprotect = apparmor_file_mprotect,
.file_lock = apparmor_file_lock,
.getprocattr = apparmor_getprocattr,
.setprocattr = apparmor_setprocattr,
.cred_alloc_blank = apparmor_cred_alloc_blank,
.cred_free = apparmor_cred_free,
.cred_prepare = apparmor_cred_prepare,
.cred_transfer = apparmor_cred_transfer,
.bprm_set_creds = apparmor_bprm_set_creds,
.bprm_committing_creds = apparmor_bprm_committing_creds,
.bprm_committed_creds = apparmor_bprm_committed_creds,
.bprm_secureexec = apparmor_bprm_secureexec,
.task_setrlimit = apparmor_task_setrlimit,
};
/*
* AppArmor sysfs module parameters
*/
static int param_set_aabool(const char *val, const struct kernel_param *kp);
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
#define param_check_aabool(name, p) __param_check(name, p, int)
static struct kernel_param_ops param_ops_aabool = {
.set = param_set_aabool,
.get = param_get_aabool
};
static int param_set_aauint(const char *val, const struct kernel_param *kp);
static int param_get_aauint(char *buffer, const struct kernel_param *kp);
#define param_check_aauint(name, p) __param_check(name, p, int)
static struct kernel_param_ops param_ops_aauint = {
.set = param_set_aauint,
.get = param_get_aauint
};
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy(name, p) __param_check(name, p, int)
static struct kernel_param_ops param_ops_aalockpolicy = {
.set = param_set_aalockpolicy,
.get = param_get_aalockpolicy
};
static int param_set_audit(const char *val, struct kernel_param *kp);
static int param_get_audit(char *buffer, struct kernel_param *kp);
static int param_set_mode(const char *val, struct kernel_param *kp);
static int param_get_mode(char *buffer, struct kernel_param *kp);
/* Flag values, also controllable via /sys/module/apparmor/parameters
* We define special types as we want to do additional mediation.
*/
/* AppArmor global enforcement switch - complain, enforce, kill */
enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
module_param_call(mode, param_set_mode, param_get_mode,
&aa_g_profile_mode, S_IRUSR | S_IWUSR);
/* Debug mode */
int aa_g_debug;
module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
/* Audit mode */
enum audit_mode aa_g_audit;
module_param_call(audit, param_set_audit, param_get_audit,
&aa_g_audit, S_IRUSR | S_IWUSR);
/* Determines if audit header is included in audited messages. This
* provides more context if the audit daemon is not running
*/
int aa_g_audit_header = 1;
module_param_named(audit_header, aa_g_audit_header, aabool,
S_IRUSR | S_IWUSR);
/* lock out loading/removal of policy
* TODO: add in at boot loading of policy, which is the only way to
* load policy, if lock_policy is set
*/
int aa_g_lock_policy;
module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
S_IRUSR | S_IWUSR);
/* Syscall logging mode */
int aa_g_logsyscall;
module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
/* Maximum pathname length before accesses will start getting rejected */
unsigned int aa_g_path_max = 2 * PATH_MAX;
module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR);
/* Determines how paranoid loading of policy is and how much verification
* on the loaded policy is done.
*/
int aa_g_paranoid_load = 1;
module_param_named(paranoid_load, aa_g_paranoid_load, aabool,
S_IRUSR | S_IWUSR);
/* Boot time disable flag */
static unsigned int apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR);
static int __init apparmor_enabled_setup(char *str)
{
unsigned long enabled;
int error = strict_strtoul(str, 0, &enabled);
if (!error)
apparmor_enabled = enabled ? 1 : 0;
return 1;
}
__setup("apparmor=", apparmor_enabled_setup);
/* set global flag turning off the ability to load policy */
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (aa_g_lock_policy)
return -EACCES;
return param_set_bool(val, kp);
}
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aabool(const char *val, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_set_bool(val, kp);
}
static int param_get_aabool(char *buffer, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aauint(const char *val, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_set_uint(val, kp);
}
static int param_get_aauint(char *buffer, const struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
return param_get_uint(buffer, kp);
}
static int param_get_audit(char *buffer, struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
}
static int param_set_audit(const char *val, struct kernel_param *kp)
{
int i;
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
if (!val)
return -EINVAL;
for (i = 0; i < AUDIT_MAX_INDEX; i++) {
if (strcmp(val, audit_mode_names[i]) == 0) {
aa_g_audit = i;
return 0;
}
}
return -EINVAL;
}
static int param_get_mode(char *buffer, struct kernel_param *kp)
{
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]);
}
static int param_set_mode(const char *val, struct kernel_param *kp)
{
int i;
if (!capable(CAP_MAC_ADMIN))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
if (!val)
return -EINVAL;
for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) {
if (strcmp(val, profile_mode_names[i]) == 0) {
aa_g_profile_mode = i;
return 0;
}
}
return -EINVAL;
}
/*
* AppArmor init functions
*/
/**
* set_init_cxt - set a task context and profile on the first task.
*
* TODO: allow setting an alternate profile than unconfined
*/
static int __init set_init_cxt(void)
{
struct cred *cred = (struct cred *)current->real_cred;
struct aa_task_cxt *cxt;
cxt = aa_alloc_task_context(GFP_KERNEL);
if (!cxt)
return -ENOMEM;
cxt->profile = aa_get_profile(root_ns->unconfined);
cred->security = cxt;
return 0;
}
static int __init apparmor_init(void)
{
int error;
if (!apparmor_enabled || !security_module_enable(&apparmor_ops)) {
aa_info_message("AppArmor disabled by boot time parameter");
apparmor_enabled = 0;
return 0;
}
error = aa_alloc_root_ns();
if (error) {
AA_ERROR("Unable to allocate default profile namespace\n");
goto alloc_out;
}
error = set_init_cxt();
if (error) {
AA_ERROR("Failed to set context on init task\n");
goto register_security_out;
}
error = register_security(&apparmor_ops);
if (error) {
AA_ERROR("Unable to register AppArmor\n");
goto set_init_cxt_out;
}
/* Report that AppArmor successfully initialized */
apparmor_initialized = 1;
if (aa_g_profile_mode == APPARMOR_COMPLAIN)
aa_info_message("AppArmor initialized: complain mode enabled");
else if (aa_g_profile_mode == APPARMOR_KILL)
aa_info_message("AppArmor initialized: kill mode enabled");
else
aa_info_message("AppArmor initialized");
return error;
set_init_cxt_out:
aa_free_task_context(current->real_cred->security);
register_security_out:
aa_free_root_ns();
alloc_out:
aa_destroy_aafs();
apparmor_enabled = 0;
return error;
}
security_initcall(apparmor_init);
|
37,168,704,014,073 | #ifndef __LINUX_IF_PACKET_H
#define __LINUX_IF_PACKET_H
#include <linux/types.h>
struct sockaddr_pkt {
unsigned short spkt_family;
unsigned char spkt_device[14];
__be16 spkt_protocol;
};
struct sockaddr_ll {
unsigned short sll_family;
__be16 sll_protocol;
int sll_ifindex;
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
unsigned char sll_addr[8];
};
/* Packet types */
#define PACKET_HOST 0 /* To us */
#define PACKET_BROADCAST 1 /* To all */
#define PACKET_MULTICAST 2 /* To group */
#define PACKET_OTHERHOST 3 /* To someone else */
#define PACKET_OUTGOING 4 /* Outgoing of any type */
/* These ones are invisible by user level */
#define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
#define PACKET_FASTROUTE 6 /* Fastrouted frame */
/* Packet socket options */
#define PACKET_ADD_MEMBERSHIP 1
#define PACKET_DROP_MEMBERSHIP 2
#define PACKET_RECV_OUTPUT 3
/* Value 4 is still used by obsolete turbo-packet. */
#define PACKET_RX_RING 5
#define PACKET_STATISTICS 6
#define PACKET_COPY_THRESH 7
#define PACKET_AUXDATA 8
#define PACKET_ORIGDEV 9
#define PACKET_VERSION 10
#define PACKET_HDRLEN 11
#define PACKET_RESERVE 12
#define PACKET_TX_RING 13
#define PACKET_LOSS 14
#define PACKET_VNET_HDR 15
#define PACKET_TX_TIMESTAMP 16
#define PACKET_TIMESTAMP 17
struct tpacket_stats {
unsigned int tp_packets;
unsigned int tp_drops;
};
struct tpacket_auxdata {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
};
/* Rx ring - header status */
#define TP_STATUS_KERNEL 0x0
#define TP_STATUS_USER 0x1
#define TP_STATUS_COPY 0x2
#define TP_STATUS_LOSING 0x4
#define TP_STATUS_CSUMNOTREADY 0x8
#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0x0
#define TP_STATUS_SEND_REQUEST 0x1
#define TP_STATUS_SENDING 0x2
#define TP_STATUS_WRONG_FORMAT 0x4
struct tpacket_hdr {
unsigned long tp_status;
unsigned int tp_len;
unsigned int tp_snaplen;
unsigned short tp_mac;
unsigned short tp_net;
unsigned int tp_sec;
unsigned int tp_usec;
};
#define TPACKET_ALIGNMENT 16
#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
struct tpacket2_hdr {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
};
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
};
/*
Frame structure:
- Start. Frame must be aligned to TPACKET_ALIGNMENT=16
- struct tpacket_hdr
- pad to TPACKET_ALIGNMENT=16
- struct sockaddr_ll
- Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16
- Start+tp_mac: [ Optional MAC header ]
- Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
- Pad to align to TPACKET_ALIGNMENT=16
*/
struct tpacket_req {
unsigned int tp_block_size; /* Minimal size of contiguous block */
unsigned int tp_block_nr; /* Number of blocks */
unsigned int tp_frame_size; /* Size of frame */
unsigned int tp_frame_nr; /* Total number of frames */
};
struct packet_mreq {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[8];
};
#define PACKET_MR_MULTICAST 0
#define PACKET_MR_PROMISC 1
#define PACKET_MR_ALLMULTI 2
#define PACKET_MR_UNICAST 3
#endif
| #ifndef __LINUX_IF_PACKET_H
#define __LINUX_IF_PACKET_H
#include <linux/types.h>
struct sockaddr_pkt {
unsigned short spkt_family;
unsigned char spkt_device[14];
__be16 spkt_protocol;
};
struct sockaddr_ll {
unsigned short sll_family;
__be16 sll_protocol;
int sll_ifindex;
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
unsigned char sll_addr[8];
};
/* Packet types */
#define PACKET_HOST 0 /* To us */
#define PACKET_BROADCAST 1 /* To all */
#define PACKET_MULTICAST 2 /* To group */
#define PACKET_OTHERHOST 3 /* To someone else */
#define PACKET_OUTGOING 4 /* Outgoing of any type */
/* These ones are invisible by user level */
#define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
#define PACKET_FASTROUTE 6 /* Fastrouted frame */
/* Packet socket options */
#define PACKET_ADD_MEMBERSHIP 1
#define PACKET_DROP_MEMBERSHIP 2
#define PACKET_RECV_OUTPUT 3
/* Value 4 is still used by obsolete turbo-packet. */
#define PACKET_RX_RING 5
#define PACKET_STATISTICS 6
#define PACKET_COPY_THRESH 7
#define PACKET_AUXDATA 8
#define PACKET_ORIGDEV 9
#define PACKET_VERSION 10
#define PACKET_HDRLEN 11
#define PACKET_RESERVE 12
#define PACKET_TX_RING 13
#define PACKET_LOSS 14
#define PACKET_VNET_HDR 15
#define PACKET_TX_TIMESTAMP 16
#define PACKET_TIMESTAMP 17
struct tpacket_stats {
unsigned int tp_packets;
unsigned int tp_drops;
};
struct tpacket_auxdata {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
__u16 tp_padding;
};
/* Rx ring - header status */
#define TP_STATUS_KERNEL 0x0
#define TP_STATUS_USER 0x1
#define TP_STATUS_COPY 0x2
#define TP_STATUS_LOSING 0x4
#define TP_STATUS_CSUMNOTREADY 0x8
#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0x0
#define TP_STATUS_SEND_REQUEST 0x1
#define TP_STATUS_SENDING 0x2
#define TP_STATUS_WRONG_FORMAT 0x4
struct tpacket_hdr {
unsigned long tp_status;
unsigned int tp_len;
unsigned int tp_snaplen;
unsigned short tp_mac;
unsigned short tp_net;
unsigned int tp_sec;
unsigned int tp_usec;
};
#define TPACKET_ALIGNMENT 16
#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
struct tpacket2_hdr {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
__u16 tp_padding;
};
#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
};
/*
Frame structure:
- Start. Frame must be aligned to TPACKET_ALIGNMENT=16
- struct tpacket_hdr
- pad to TPACKET_ALIGNMENT=16
- struct sockaddr_ll
- Gap, chosen so that packet data (Start+tp_net) alignes to TPACKET_ALIGNMENT=16
- Start+tp_mac: [ Optional MAC header ]
- Start+tp_net: Packet data, aligned to TPACKET_ALIGNMENT=16.
- Pad to align to TPACKET_ALIGNMENT=16
*/
struct tpacket_req {
unsigned int tp_block_size; /* Minimal size of contiguous block */
unsigned int tp_block_nr; /* Number of blocks */
unsigned int tp_frame_size; /* Size of frame */
unsigned int tp_frame_nr; /* Total number of frames */
};
struct packet_mreq {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[8];
};
#define PACKET_MR_MULTICAST 0
#define PACKET_MR_PROMISC 1
#define PACKET_MR_ALLMULTI 2
#define PACKET_MR_UNICAST 3
#endif
|
265,688,133,345,464 | /*
* security/tomoyo/mount.c
*
* Copyright (C) 2005-2010 NTT DATA CORPORATION
*/
#include <linux/slab.h>
#include "common.h"
/* Keywords for mount restrictions. */
/* Allow to call 'mount --bind /source_dir /dest_dir' */
#define TOMOYO_MOUNT_BIND_KEYWORD "--bind"
/* Allow to call 'mount --move /old_dir /new_dir ' */
#define TOMOYO_MOUNT_MOVE_KEYWORD "--move"
/* Allow to call 'mount -o remount /dir ' */
#define TOMOYO_MOUNT_REMOUNT_KEYWORD "--remount"
/* Allow to call 'mount --make-unbindable /dir' */
#define TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD "--make-unbindable"
/* Allow to call 'mount --make-private /dir' */
#define TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD "--make-private"
/* Allow to call 'mount --make-slave /dir' */
#define TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD "--make-slave"
/* Allow to call 'mount --make-shared /dir' */
#define TOMOYO_MOUNT_MAKE_SHARED_KEYWORD "--make-shared"
/**
* tomoyo_audit_mount_log - Audit mount log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_mount_log(struct tomoyo_request_info *r)
{
const char *dev = r->param.mount.dev->name;
const char *dir = r->param.mount.dir->name;
const char *type = r->param.mount.type->name;
const unsigned long flags = r->param.mount.flags;
if (r->granted)
return 0;
if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD))
tomoyo_warn_log(r, "mount -o remount %s 0x%lX", dir, flags);
else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD)
|| !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD))
tomoyo_warn_log(r, "mount %s %s %s 0x%lX", type, dev, dir,
flags);
else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD))
tomoyo_warn_log(r, "mount %s %s 0x%lX", type, dir, flags);
else
tomoyo_warn_log(r, "mount -t %s %s %s 0x%lX", type, dev, dir,
flags);
return tomoyo_supervisor(r,
TOMOYO_KEYWORD_ALLOW_MOUNT "%s %s %s 0x%lX\n",
tomoyo_pattern(r->param.mount.dev),
tomoyo_pattern(r->param.mount.dir), type,
flags);
}
static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_mount_acl *acl =
container_of(ptr, typeof(*acl), head);
return tomoyo_compare_number_union(r->param.mount.flags, &acl->flags) &&
tomoyo_compare_name_union(r->param.mount.type, &acl->fs_type) &&
tomoyo_compare_name_union(r->param.mount.dir, &acl->dir_name) &&
(!r->param.mount.need_dev ||
tomoyo_compare_name_union(r->param.mount.dev, &acl->dev_name));
}
/**
* tomoyo_mount_acl - Check permission for mount() operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @dev_name: Name of device file.
* @dir: Pointer to "struct path".
* @type: Name of filesystem type.
* @flags: Mount options.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
struct path *dir, char *type, unsigned long flags)
{
struct path path;
struct file_system_type *fstype = NULL;
const char *requested_type = NULL;
const char *requested_dir_name = NULL;
const char *requested_dev_name = NULL;
struct tomoyo_path_info rtype;
struct tomoyo_path_info rdev;
struct tomoyo_path_info rdir;
int need_dev = 0;
int error = -ENOMEM;
/* Get fstype. */
requested_type = tomoyo_encode(type);
if (!requested_type)
goto out;
rtype.name = requested_type;
tomoyo_fill_path_info(&rtype);
/* Get mount point. */
requested_dir_name = tomoyo_realpath_from_path(dir);
if (!requested_dir_name) {
error = -ENOMEM;
goto out;
}
rdir.name = requested_dir_name;
tomoyo_fill_path_info(&rdir);
/* Compare fs name. */
if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) {
/* dev_name is ignored. */
} else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) {
/* dev_name is ignored. */
} else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) {
need_dev = -1; /* dev_name is a directory */
} else {
fstype = get_fs_type(type);
if (!fstype) {
error = -ENODEV;
goto out;
}
if (fstype->fs_flags & FS_REQUIRES_DEV)
/* dev_name is a block device file. */
need_dev = 1;
}
if (need_dev) {
/* Get mount point or device file. */
if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
error = -ENOENT;
goto out;
}
requested_dev_name = tomoyo_realpath_from_path(&path);
path_put(&path);
if (!requested_dev_name) {
error = -ENOENT;
goto out;
}
} else {
/* Map dev_name to "<NULL>" if no dev_name given. */
if (!dev_name)
dev_name = "<NULL>";
requested_dev_name = tomoyo_encode(dev_name);
if (!requested_dev_name) {
error = -ENOMEM;
goto out;
}
}
rdev.name = requested_dev_name;
tomoyo_fill_path_info(&rdev);
r->param_type = TOMOYO_TYPE_MOUNT_ACL;
r->param.mount.need_dev = need_dev;
r->param.mount.dev = &rdev;
r->param.mount.dir = &rdir;
r->param.mount.type = &rtype;
r->param.mount.flags = flags;
do {
tomoyo_check_acl(r, tomoyo_check_mount_acl);
error = tomoyo_audit_mount_log(r);
} while (error == TOMOYO_RETRY_REQUEST);
out:
kfree(requested_dev_name);
kfree(requested_dir_name);
if (fstype)
put_filesystem(fstype);
kfree(requested_type);
return error;
}
/**
* tomoyo_mount_permission - Check permission for mount() operation.
*
* @dev_name: Name of device file.
* @path: Pointer to "struct path".
* @type: Name of filesystem type. May be NULL.
* @flags: Mount options.
* @data_page: Optional data. May be NULL.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_mount_permission(char *dev_name, struct path *path, char *type,
unsigned long flags, void *data_page)
{
struct tomoyo_request_info r;
int error;
int idx;
if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT)
== TOMOYO_CONFIG_DISABLED)
return 0;
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
if (flags & MS_REMOUNT) {
type = TOMOYO_MOUNT_REMOUNT_KEYWORD;
flags &= ~MS_REMOUNT;
}
if (flags & MS_MOVE) {
type = TOMOYO_MOUNT_MOVE_KEYWORD;
flags &= ~MS_MOVE;
}
if (flags & MS_BIND) {
type = TOMOYO_MOUNT_BIND_KEYWORD;
flags &= ~MS_BIND;
}
if (flags & MS_UNBINDABLE) {
type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD;
flags &= ~MS_UNBINDABLE;
}
if (flags & MS_PRIVATE) {
type = TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD;
flags &= ~MS_PRIVATE;
}
if (flags & MS_SLAVE) {
type = TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD;
flags &= ~MS_SLAVE;
}
if (flags & MS_SHARED) {
type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD;
flags &= ~MS_SHARED;
}
if (!type)
type = "<NULL>";
idx = tomoyo_read_lock();
error = tomoyo_mount_acl(&r, dev_name, path, type, flags);
tomoyo_read_unlock(idx);
return error;
}
static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
return tomoyo_same_acl_head(&p1->head, &p2->head) &&
tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
tomoyo_same_number_union(&p1->flags, &p2->flags);
}
/**
* tomoyo_write_mount - Write "struct tomoyo_mount_acl" list.
*
* @data: String to parse.
* @domain: Pointer to "struct tomoyo_domain_info".
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain,
const bool is_delete)
{
struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
int error = is_delete ? -ENOENT : -ENOMEM;
char *w[4];
if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[3][0])
return -EINVAL;
if (!tomoyo_parse_name_union(w[0], &e.dev_name) ||
!tomoyo_parse_name_union(w[1], &e.dir_name) ||
!tomoyo_parse_name_union(w[2], &e.fs_type) ||
!tomoyo_parse_number_union(w[3], &e.flags))
goto out;
error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain,
tomoyo_same_mount_acl, NULL);
out:
tomoyo_put_name_union(&e.dev_name);
tomoyo_put_name_union(&e.dir_name);
tomoyo_put_name_union(&e.fs_type);
tomoyo_put_number_union(&e.flags);
return error;
}
| /*
* security/tomoyo/mount.c
*
* Copyright (C) 2005-2010 NTT DATA CORPORATION
*/
#include <linux/slab.h>
#include "common.h"
/* Keywords for mount restrictions. */
/* Allow to call 'mount --bind /source_dir /dest_dir' */
#define TOMOYO_MOUNT_BIND_KEYWORD "--bind"
/* Allow to call 'mount --move /old_dir /new_dir ' */
#define TOMOYO_MOUNT_MOVE_KEYWORD "--move"
/* Allow to call 'mount -o remount /dir ' */
#define TOMOYO_MOUNT_REMOUNT_KEYWORD "--remount"
/* Allow to call 'mount --make-unbindable /dir' */
#define TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD "--make-unbindable"
/* Allow to call 'mount --make-private /dir' */
#define TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD "--make-private"
/* Allow to call 'mount --make-slave /dir' */
#define TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD "--make-slave"
/* Allow to call 'mount --make-shared /dir' */
#define TOMOYO_MOUNT_MAKE_SHARED_KEYWORD "--make-shared"
/**
* tomoyo_audit_mount_log - Audit mount log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_mount_log(struct tomoyo_request_info *r)
{
const char *dev = r->param.mount.dev->name;
const char *dir = r->param.mount.dir->name;
const char *type = r->param.mount.type->name;
const unsigned long flags = r->param.mount.flags;
if (r->granted)
return 0;
if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD))
tomoyo_warn_log(r, "mount -o remount %s 0x%lX", dir, flags);
else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD)
|| !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD))
tomoyo_warn_log(r, "mount %s %s %s 0x%lX", type, dev, dir,
flags);
else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD))
tomoyo_warn_log(r, "mount %s %s 0x%lX", type, dir, flags);
else
tomoyo_warn_log(r, "mount -t %s %s %s 0x%lX", type, dev, dir,
flags);
return tomoyo_supervisor(r,
TOMOYO_KEYWORD_ALLOW_MOUNT "%s %s %s 0x%lX\n",
tomoyo_pattern(r->param.mount.dev),
tomoyo_pattern(r->param.mount.dir), type,
flags);
}
static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_mount_acl *acl =
container_of(ptr, typeof(*acl), head);
return tomoyo_compare_number_union(r->param.mount.flags, &acl->flags) &&
tomoyo_compare_name_union(r->param.mount.type, &acl->fs_type) &&
tomoyo_compare_name_union(r->param.mount.dir, &acl->dir_name) &&
(!r->param.mount.need_dev ||
tomoyo_compare_name_union(r->param.mount.dev, &acl->dev_name));
}
/**
* tomoyo_mount_acl - Check permission for mount() operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @dev_name: Name of device file.
* @dir: Pointer to "struct path".
* @type: Name of filesystem type.
* @flags: Mount options.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
struct path *dir, char *type, unsigned long flags)
{
struct path path;
struct file_system_type *fstype = NULL;
const char *requested_type = NULL;
const char *requested_dir_name = NULL;
const char *requested_dev_name = NULL;
struct tomoyo_path_info rtype;
struct tomoyo_path_info rdev;
struct tomoyo_path_info rdir;
int need_dev = 0;
int error = -ENOMEM;
/* Get fstype. */
requested_type = tomoyo_encode(type);
if (!requested_type)
goto out;
rtype.name = requested_type;
tomoyo_fill_path_info(&rtype);
/* Get mount point. */
requested_dir_name = tomoyo_realpath_from_path(dir);
if (!requested_dir_name) {
error = -ENOMEM;
goto out;
}
rdir.name = requested_dir_name;
tomoyo_fill_path_info(&rdir);
/* Compare fs name. */
if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) {
/* dev_name is ignored. */
} else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) {
/* dev_name is ignored. */
} else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) ||
!strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) {
need_dev = -1; /* dev_name is a directory */
} else {
fstype = get_fs_type(type);
if (!fstype) {
error = -ENODEV;
goto out;
}
if (fstype->fs_flags & FS_REQUIRES_DEV)
/* dev_name is a block device file. */
need_dev = 1;
}
if (need_dev) {
/* Get mount point or device file. */
if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
error = -ENOENT;
goto out;
}
requested_dev_name = tomoyo_realpath_from_path(&path);
path_put(&path);
if (!requested_dev_name) {
error = -ENOENT;
goto out;
}
} else {
/* Map dev_name to "<NULL>" if no dev_name given. */
if (!dev_name)
dev_name = "<NULL>";
requested_dev_name = tomoyo_encode(dev_name);
if (!requested_dev_name) {
error = -ENOMEM;
goto out;
}
}
rdev.name = requested_dev_name;
tomoyo_fill_path_info(&rdev);
r->param_type = TOMOYO_TYPE_MOUNT_ACL;
r->param.mount.need_dev = need_dev;
r->param.mount.dev = &rdev;
r->param.mount.dir = &rdir;
r->param.mount.type = &rtype;
r->param.mount.flags = flags;
do {
tomoyo_check_acl(r, tomoyo_check_mount_acl);
error = tomoyo_audit_mount_log(r);
} while (error == TOMOYO_RETRY_REQUEST);
out:
kfree(requested_dev_name);
kfree(requested_dir_name);
if (fstype)
put_filesystem(fstype);
kfree(requested_type);
return error;
}
/**
* tomoyo_mount_permission - Check permission for mount() operation.
*
* @dev_name: Name of device file.
* @path: Pointer to "struct path".
* @type: Name of filesystem type. May be NULL.
* @flags: Mount options.
* @data_page: Optional data. May be NULL.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_mount_permission(char *dev_name, struct path *path, char *type,
unsigned long flags, void *data_page)
{
struct tomoyo_request_info r;
int error;
int idx;
if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT)
== TOMOYO_CONFIG_DISABLED)
return 0;
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
if (flags & MS_REMOUNT) {
type = TOMOYO_MOUNT_REMOUNT_KEYWORD;
flags &= ~MS_REMOUNT;
}
if (flags & MS_MOVE) {
type = TOMOYO_MOUNT_MOVE_KEYWORD;
flags &= ~MS_MOVE;
}
if (flags & MS_BIND) {
type = TOMOYO_MOUNT_BIND_KEYWORD;
flags &= ~MS_BIND;
}
if (flags & MS_UNBINDABLE) {
type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD;
flags &= ~MS_UNBINDABLE;
}
if (flags & MS_PRIVATE) {
type = TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD;
flags &= ~MS_PRIVATE;
}
if (flags & MS_SLAVE) {
type = TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD;
flags &= ~MS_SLAVE;
}
if (flags & MS_SHARED) {
type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD;
flags &= ~MS_SHARED;
}
if (!type)
type = "<NULL>";
idx = tomoyo_read_lock();
error = tomoyo_mount_acl(&r, dev_name, path, type, flags);
tomoyo_read_unlock(idx);
return error;
}
static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
return tomoyo_same_acl_head(&p1->head, &p2->head) &&
tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
tomoyo_same_number_union(&p1->flags, &p2->flags);
}
/**
* tomoyo_write_mount - Write "struct tomoyo_mount_acl" list.
*
* @data: String to parse.
* @domain: Pointer to "struct tomoyo_domain_info".
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain,
const bool is_delete)
{
struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
int error = is_delete ? -ENOENT : -ENOMEM;
char *w[4];
if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[3][0])
return -EINVAL;
if (!tomoyo_parse_name_union(w[0], &e.dev_name) ||
!tomoyo_parse_name_union(w[1], &e.dir_name) ||
!tomoyo_parse_name_union(w[2], &e.fs_type) ||
!tomoyo_parse_number_union(w[3], &e.flags))
goto out;
error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain,
tomoyo_same_mount_acl, NULL);
out:
tomoyo_put_name_union(&e.dev_name);
tomoyo_put_name_union(&e.dir_name);
tomoyo_put_name_union(&e.fs_type);
tomoyo_put_number_union(&e.flags);
return error;
}
|
55,760,068,778,071 | /*
* linux/fs/lockd/clntproc.c
*
* RPC procedures for the client side NLM implementation
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/nfs_fs.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#define NLMDBG_FACILITY NLMDBG_CLIENT
#define NLMCLNT_GRACE_WAIT (5*HZ)
#define NLMCLNT_POLL_TIMEOUT (30*HZ)
#define NLMCLNT_MAX_RETRIES 3
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
static int nlm_stat_to_errno(__be32 stat);
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
static const struct rpc_call_ops nlmclnt_unlock_ops;
static const struct rpc_call_ops nlmclnt_cancel_ops;
/*
* Cookie counter for NLM requests
*/
static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
void nlmclnt_next_cookie(struct nlm_cookie *c)
{
u32 cookie = atomic_inc_return(&nlm_cookie);
memcpy(c->data, &cookie, 4);
c->len=4;
}
static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
{
atomic_inc(&lockowner->count);
return lockowner;
}
static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
{
if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
list_del(&lockowner->list);
spin_unlock(&lockowner->host->h_lock);
nlmclnt_release_host(lockowner->host);
kfree(lockowner);
}
static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
{
struct nlm_lockowner *lockowner;
list_for_each_entry(lockowner, &host->h_lockowners, list) {
if (lockowner->pid == pid)
return -EBUSY;
}
return 0;
}
static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
{
uint32_t res;
do {
res = host->h_pidcount++;
} while (nlm_pidbusy(host, res) < 0);
return res;
}
static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
{
struct nlm_lockowner *lockowner;
list_for_each_entry(lockowner, &host->h_lockowners, list) {
if (lockowner->owner != owner)
continue;
return nlm_get_lockowner(lockowner);
}
return NULL;
}
static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
{
struct nlm_lockowner *res, *new = NULL;
spin_lock(&host->h_lock);
res = __nlm_find_lockowner(host, owner);
if (res == NULL) {
spin_unlock(&host->h_lock);
new = kmalloc(sizeof(*new), GFP_KERNEL);
spin_lock(&host->h_lock);
res = __nlm_find_lockowner(host, owner);
if (res == NULL && new != NULL) {
res = new;
atomic_set(&new->count, 1);
new->owner = owner;
new->pid = __nlm_alloc_pid(host);
new->host = nlm_get_host(host);
list_add(&new->list, &host->h_lockowners);
new = NULL;
}
}
spin_unlock(&host->h_lock);
kfree(new);
return res;
}
/*
* Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
*/
static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
nlmclnt_next_cookie(&argp->cookie);
memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
lock->caller = utsname()->nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
(unsigned int)fl->fl_u.nfs_fl.owner->pid,
utsname()->nodename);
lock->svid = fl->fl_u.nfs_fl.owner->pid;
lock->fl.fl_start = fl->fl_start;
lock->fl.fl_end = fl->fl_end;
lock->fl.fl_type = fl->fl_type;
}
static void nlmclnt_release_lockargs(struct nlm_rqst *req)
{
BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
}
/**
* nlmclnt_proc - Perform a single client-side lock request
* @host: address of a valid nlm_host context representing the NLM server
* @cmd: fcntl-style file lock operation to perform
* @fl: address of arguments for the lock operation
*
*/
int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
{
struct nlm_rqst *call;
int status;
nlm_get_host(host);
call = nlm_alloc_call(host);
if (call == NULL)
return -ENOMEM;
nlmclnt_locks_init_private(fl, host);
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
status = nlmclnt_lock(call, fl);
} else
status = nlmclnt_unlock(call, fl);
} else if (IS_GETLK(cmd))
status = nlmclnt_test(call, fl);
else
status = -EINVAL;
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
dprintk("lockd: clnt proc returns %d\n", status);
return status;
}
EXPORT_SYMBOL_GPL(nlmclnt_proc);
/*
* Allocate an NLM RPC call struct
*
* Note: the caller must hold a reference to host. In case of failure,
* this reference will be released.
*/
struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
{
struct nlm_rqst *call;
for(;;) {
call = kzalloc(sizeof(*call), GFP_KERNEL);
if (call != NULL) {
atomic_set(&call->a_count, 1);
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
call->a_host = host;
return call;
}
if (signalled())
break;
printk("nlm_alloc_call: failed, waiting for memory\n");
schedule_timeout_interruptible(5*HZ);
}
nlmclnt_release_host(host);
return NULL;
}
void nlmclnt_release_call(struct nlm_rqst *call)
{
if (!atomic_dec_and_test(&call->a_count))
return;
nlmclnt_release_host(call->a_host);
nlmclnt_release_lockargs(call);
kfree(call);
}
static void nlmclnt_rpc_release(void *data)
{
nlmclnt_release_call(data);
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
{
DEFINE_WAIT(wait);
int status = -EINTR;
prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
if (!signalled ()) {
schedule_timeout(NLMCLNT_GRACE_WAIT);
try_to_freeze();
if (!signalled ())
status = 0;
}
finish_wait(queue, &wait);
return status;
}
/*
* Generic NLM call
*/
static int
nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct nlm_args *argp = &req->a_args;
struct nlm_res *resp = &req->a_res;
struct rpc_message msg = {
.rpc_argp = argp,
.rpc_resp = resp,
.rpc_cred = cred,
};
int status;
dprintk("lockd: call procedure %d on %s\n",
(int)proc, host->h_name);
do {
if (host->h_reclaiming && !argp->reclaim)
goto in_grace_period;
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
msg.rpc_proc = &clnt->cl_procinfo[proc];
/* Perform the RPC call. If an error occurs, try again */
if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
dprintk("lockd: rpc_call returned error %d\n", -status);
switch (status) {
case -EPROTONOSUPPORT:
status = -EINVAL;
break;
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ENOTCONN:
nlm_rebind_host(host);
status = -EAGAIN;
break;
case -ERESTARTSYS:
return signalled () ? -EINTR : status;
default:
break;
}
break;
} else
if (resp->status == nlm_lck_denied_grace_period) {
dprintk("lockd: server in grace period\n");
if (argp->reclaim) {
printk(KERN_WARNING
"lockd: spurious grace period reject?!\n");
return -ENOLCK;
}
} else {
if (!argp->reclaim) {
/* We appear to be out of the grace period */
wake_up_all(&host->h_gracewait);
}
dprintk("lockd: server returns status %d\n", resp->status);
return 0; /* Okay, call complete */
}
in_grace_period:
/*
* The server has rebooted and appears to be in the grace
* period during which locks are only allowed to be
* reclaimed.
* We can only back off and try again later.
*/
status = nlm_wait_on_grace(&host->h_gracewait);
} while (status == 0);
return status;
}
/*
* Generic NLM call, async version.
*/
static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct rpc_task_setup task_setup_data = {
.rpc_message = msg,
.callback_ops = tk_ops,
.callback_data = req,
.flags = RPC_TASK_ASYNC,
};
dprintk("lockd: call procedure %d on %s (async)\n",
(int)proc, host->h_name);
/* If we have no RPC client yet, create one. */
clnt = nlm_bind_host(host);
if (clnt == NULL)
goto out_err;
msg->rpc_proc = &clnt->cl_procinfo[proc];
task_setup_data.rpc_client = clnt;
/* bootstrap and kick off the async RPC call */
return rpc_run_task(&task_setup_data);
out_err:
tk_ops->rpc_release(req);
return ERR_PTR(-ENOLCK);
}
static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{
struct rpc_task *task;
task = __nlm_async_call(req, proc, msg, tk_ops);
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
return 0;
}
/*
* NLM asynchronous call.
*/
int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_args,
.rpc_resp = &req->a_res,
};
return nlm_do_async_call(req, proc, &msg, tk_ops);
}
int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_res,
};
return nlm_do_async_call(req, proc, &msg, tk_ops);
}
/*
* NLM client asynchronous call.
*
* Note that although the calls are asynchronous, and are therefore
* guaranteed to complete, we still always attempt to wait for
* completion in order to be able to correctly track the lock
* state.
*/
static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_args,
.rpc_resp = &req->a_res,
.rpc_cred = cred,
};
struct rpc_task *task;
int err;
task = __nlm_async_call(req, proc, &msg, tk_ops);
if (IS_ERR(task))
return PTR_ERR(task);
err = rpc_wait_for_completion_task(task);
rpc_put_task(task);
return err;
}
/*
* TEST for the presence of a conflicting lock
*/
static int
nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
{
int status;
status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
if (status < 0)
goto out;
switch (req->a_res.status) {
case nlm_granted:
fl->fl_type = F_UNLCK;
break;
case nlm_lck_denied:
/*
* Report the conflicting lock back to the application.
*/
fl->fl_start = req->a_res.lock.fl.fl_start;
fl->fl_end = req->a_res.lock.fl.fl_end;
fl->fl_type = req->a_res.lock.fl.fl_type;
fl->fl_pid = 0;
break;
default:
status = nlm_stat_to_errno(req->a_res.status);
}
out:
nlmclnt_release_call(req);
return status;
}
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
}
static void nlmclnt_locks_release_private(struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
list_del(&fl->fl_u.nfs_fl.list);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
}
static const struct file_lock_operations nlmclnt_lock_ops = {
.fl_copy_lock = nlmclnt_locks_copy_lock,
.fl_release_private = nlmclnt_locks_release_private,
};
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
{
BUG_ON(fl->fl_ops != NULL);
fl->fl_u.nfs_fl.state = 0;
fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
fl->fl_ops = &nlmclnt_lock_ops;
}
static int do_vfs_lock(struct file_lock *fl)
{
int res = 0;
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
res = posix_lock_file_wait(fl->fl_file, fl);
break;
case FL_FLOCK:
res = flock_lock_file_wait(fl->fl_file, fl);
break;
default:
BUG();
}
return res;
}
/*
* LOCK: Try to create a lock
*
* Programmer Harassment Alert
*
* When given a blocking lock request in a sync RPC call, the HPUX lockd
* will faithfully return LCK_BLOCKED but never cares to notify us when
* the lock could be granted. This way, our local process could hang
* around forever waiting for the callback.
*
* Solution A: Implement busy-waiting
* Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
*
* For now I am implementing solution A, because I hate the idea of
* re-implementing lockd for a third time in two months. The async
* calls shouldn't be too hard to do, however.
*
* This is one of the lovely things about standards in the NFS area:
* they're so soft and squishy you can't really blame HP for doing this.
*/
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
struct nlm_wait *block = NULL;
unsigned char fl_flags = fl->fl_flags;
unsigned char fl_type;
int status = -ENOLCK;
if (nsm_monitor(host) < 0)
goto out;
req->a_args.state = nsm_local_state;
fl->fl_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
fl->fl_flags = fl_flags;
if (status < 0)
goto out;
block = nlmclnt_prepare_block(host, fl);
again:
/*
* Initialise resp->status to a valid non-zero value,
* since 0 == nlm_lck_granted
*/
resp->status = nlm_lck_blocked;
for(;;) {
/* Reboot protection */
fl->fl_u.nfs_fl.state = host->h_state;
status = nlmclnt_call(cred, req, NLMPROC_LOCK);
if (status < 0)
break;
/* Did a reclaimer thread notify us of a server reboot? */
if (resp->status == nlm_lck_denied_grace_period)
continue;
if (resp->status != nlm_lck_blocked)
break;
/* Wait on an NLM blocking lock */
status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
if (status < 0)
break;
if (resp->status != nlm_lck_blocked)
break;
}
/* if we were interrupted while blocking, then cancel the lock request
* and exit
*/
if (resp->status == nlm_lck_blocked) {
if (!req->a_args.block)
goto out_unlock;
if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
goto out_unblock;
}
if (resp->status == nlm_granted) {
down_read(&host->h_rwsem);
/* Check whether or not the server has rebooted */
if (fl->fl_u.nfs_fl.state != host->h_state) {
up_read(&host->h_rwsem);
goto again;
}
/* Ensure the resulting lock will get added to granted list */
fl->fl_flags |= FL_SLEEP;
if (do_vfs_lock(fl) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
up_read(&host->h_rwsem);
fl->fl_flags = fl_flags;
status = 0;
}
if (status < 0)
goto out_unlock;
/*
* EAGAIN doesn't make sense for sleeping locks, and in some
* cases NLM_LCK_DENIED is returned for a permanent error. So
* turn it into an ENOLCK.
*/
if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
status = -ENOLCK;
else
status = nlm_stat_to_errno(resp->status);
out_unblock:
nlmclnt_finish_block(block);
out:
nlmclnt_release_call(req);
return status;
out_unlock:
/* Fatal error: ensure that we remove the lock altogether */
dprintk("lockd: lock attempt ended in fatal error.\n"
" Attempting to unlock.\n");
nlmclnt_finish_block(block);
fl_type = fl->fl_type;
fl->fl_type = F_UNLCK;
down_read(&host->h_rwsem);
do_vfs_lock(fl);
up_read(&host->h_rwsem);
fl->fl_type = fl_type;
fl->fl_flags = fl_flags;
nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
return status;
}
/*
* RECLAIM: Try to reclaim a lock
*/
int
nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
{
struct nlm_rqst reqst, *req;
int status;
req = &reqst;
memset(req, 0, sizeof(*req));
locks_init_lock(&req->a_args.lock.fl);
locks_init_lock(&req->a_res.lock.fl);
req->a_host = host;
req->a_flags = 0;
/* Set up the argument struct */
nlmclnt_setlockargs(req, fl);
req->a_args.reclaim = 1;
status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
if (status >= 0 && req->a_res.status == nlm_granted)
return 0;
printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
"(errno %d, status %d)\n", fl->fl_pid,
status, ntohl(req->a_res.status));
/*
* FIXME: This is a serious failure. We can
*
* a. Ignore the problem
* b. Send the owning process some signal (Linux doesn't have
* SIGLOST, though...)
* c. Retry the operation
*
* Until someone comes up with a simple implementation
* for b or c, I'll choose option a.
*/
return -ENOLCK;
}
/*
* UNLOCK: remove an existing lock
*/
static int
nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
int status;
unsigned char fl_flags = fl->fl_flags;
/*
* Note: the server is supposed to either grant us the unlock
* request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
* case, we want to unlock.
*/
fl->fl_flags |= FL_EXISTS;
down_read(&host->h_rwsem);
status = do_vfs_lock(fl);
up_read(&host->h_rwsem);
fl->fl_flags = fl_flags;
if (status == -ENOENT) {
status = 0;
goto out;
}
atomic_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
if (status < 0)
goto out;
if (resp->status == nlm_granted)
goto out;
if (resp->status != nlm_lck_denied_nolocks)
printk("lockd: unexpected unlock status: %d\n", resp->status);
/* What to do now? I'm out of my depth... */
status = -ENOLCK;
out:
nlmclnt_release_call(req);
return status;
}
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task))
goto die;
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
goto retry_rebind;
}
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
rpc_delay(task, NLMCLNT_GRACE_WAIT);
goto retry_unlock;
}
if (status != NLM_LCK_GRANTED)
printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
die:
return;
retry_rebind:
nlm_rebind_host(req->a_host);
retry_unlock:
rpc_restart_call(task);
}
static const struct rpc_call_ops nlmclnt_unlock_ops = {
.rpc_call_done = nlmclnt_unlock_callback,
.rpc_release = nlmclnt_rpc_release,
};
/*
* Cancel a blocked lock request.
* We always use an async RPC call for this in order not to hang a
* process that has been Ctrl-C'ed.
*/
static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
{
struct nlm_rqst *req;
int status;
dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
" Attempting to cancel lock.\n");
req = nlm_alloc_call(nlm_get_host(host));
if (!req)
return -ENOMEM;
req->a_flags = RPC_TASK_ASYNC;
nlmclnt_setlockargs(req, fl);
req->a_args.block = block;
atomic_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status == 0 && req->a_res.status == nlm_lck_denied)
status = -ENOLCK;
nlmclnt_release_call(req);
return status;
}
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task))
goto die;
if (task->tk_status < 0) {
dprintk("lockd: CANCEL call error %d, retrying.\n",
task->tk_status);
goto retry_cancel;
}
dprintk("lockd: cancel status %u (task %u)\n",
status, task->tk_pid);
switch (status) {
case NLM_LCK_GRANTED:
case NLM_LCK_DENIED_GRACE_PERIOD:
case NLM_LCK_DENIED:
/* Everything's good */
break;
case NLM_LCK_DENIED_NOLOCKS:
dprintk("lockd: CANCEL failed (server has no locks)\n");
goto retry_cancel;
default:
printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
status);
}
die:
return;
retry_cancel:
/* Don't ever retry more than 3 times */
if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
goto die;
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
}
static const struct rpc_call_ops nlmclnt_cancel_ops = {
.rpc_call_done = nlmclnt_cancel_callback,
.rpc_release = nlmclnt_rpc_release,
};
/*
* Convert an NLM status code to a generic kernel errno
*/
static int
nlm_stat_to_errno(__be32 status)
{
switch(ntohl(status)) {
case NLM_LCK_GRANTED:
return 0;
case NLM_LCK_DENIED:
return -EAGAIN;
case NLM_LCK_DENIED_NOLOCKS:
case NLM_LCK_DENIED_GRACE_PERIOD:
return -ENOLCK;
case NLM_LCK_BLOCKED:
printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
return -ENOLCK;
#ifdef CONFIG_LOCKD_V4
case NLM_DEADLCK:
return -EDEADLK;
case NLM_ROFS:
return -EROFS;
case NLM_STALE_FH:
return -ESTALE;
case NLM_FBIG:
return -EOVERFLOW;
case NLM_FAILED:
return -ENOLCK;
#endif
}
printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
return -ENOLCK;
}
| /*
* linux/fs/lockd/clntproc.c
*
* RPC procedures for the client side NLM implementation
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/nfs_fs.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#define NLMDBG_FACILITY NLMDBG_CLIENT
#define NLMCLNT_GRACE_WAIT (5*HZ)
#define NLMCLNT_POLL_TIMEOUT (30*HZ)
#define NLMCLNT_MAX_RETRIES 3
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
static int nlm_stat_to_errno(__be32 stat);
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
static const struct rpc_call_ops nlmclnt_unlock_ops;
static const struct rpc_call_ops nlmclnt_cancel_ops;
/*
* Cookie counter for NLM requests
*/
static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
void nlmclnt_next_cookie(struct nlm_cookie *c)
{
u32 cookie = atomic_inc_return(&nlm_cookie);
memcpy(c->data, &cookie, 4);
c->len=4;
}
static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
{
atomic_inc(&lockowner->count);
return lockowner;
}
static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
{
if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
list_del(&lockowner->list);
spin_unlock(&lockowner->host->h_lock);
nlmclnt_release_host(lockowner->host);
kfree(lockowner);
}
static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
{
struct nlm_lockowner *lockowner;
list_for_each_entry(lockowner, &host->h_lockowners, list) {
if (lockowner->pid == pid)
return -EBUSY;
}
return 0;
}
static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
{
uint32_t res;
do {
res = host->h_pidcount++;
} while (nlm_pidbusy(host, res) < 0);
return res;
}
static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
{
struct nlm_lockowner *lockowner;
list_for_each_entry(lockowner, &host->h_lockowners, list) {
if (lockowner->owner != owner)
continue;
return nlm_get_lockowner(lockowner);
}
return NULL;
}
static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner)
{
struct nlm_lockowner *res, *new = NULL;
spin_lock(&host->h_lock);
res = __nlm_find_lockowner(host, owner);
if (res == NULL) {
spin_unlock(&host->h_lock);
new = kmalloc(sizeof(*new), GFP_KERNEL);
spin_lock(&host->h_lock);
res = __nlm_find_lockowner(host, owner);
if (res == NULL && new != NULL) {
res = new;
atomic_set(&new->count, 1);
new->owner = owner;
new->pid = __nlm_alloc_pid(host);
new->host = nlm_get_host(host);
list_add(&new->list, &host->h_lockowners);
new = NULL;
}
}
spin_unlock(&host->h_lock);
kfree(new);
return res;
}
/*
* Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
*/
static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
nlmclnt_next_cookie(&argp->cookie);
memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
lock->caller = utsname()->nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
(unsigned int)fl->fl_u.nfs_fl.owner->pid,
utsname()->nodename);
lock->svid = fl->fl_u.nfs_fl.owner->pid;
lock->fl.fl_start = fl->fl_start;
lock->fl.fl_end = fl->fl_end;
lock->fl.fl_type = fl->fl_type;
}
static void nlmclnt_release_lockargs(struct nlm_rqst *req)
{
BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
}
/**
* nlmclnt_proc - Perform a single client-side lock request
* @host: address of a valid nlm_host context representing the NLM server
* @cmd: fcntl-style file lock operation to perform
* @fl: address of arguments for the lock operation
*
*/
int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
{
struct nlm_rqst *call;
int status;
nlm_get_host(host);
call = nlm_alloc_call(host);
if (call == NULL)
return -ENOMEM;
nlmclnt_locks_init_private(fl, host);
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
status = nlmclnt_lock(call, fl);
} else
status = nlmclnt_unlock(call, fl);
} else if (IS_GETLK(cmd))
status = nlmclnt_test(call, fl);
else
status = -EINVAL;
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
dprintk("lockd: clnt proc returns %d\n", status);
return status;
}
EXPORT_SYMBOL_GPL(nlmclnt_proc);
/*
* Allocate an NLM RPC call struct
*
* Note: the caller must hold a reference to host. In case of failure,
* this reference will be released.
*/
struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
{
struct nlm_rqst *call;
for(;;) {
call = kzalloc(sizeof(*call), GFP_KERNEL);
if (call != NULL) {
atomic_set(&call->a_count, 1);
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
call->a_host = host;
return call;
}
if (signalled())
break;
printk("nlm_alloc_call: failed, waiting for memory\n");
schedule_timeout_interruptible(5*HZ);
}
nlmclnt_release_host(host);
return NULL;
}
void nlmclnt_release_call(struct nlm_rqst *call)
{
if (!atomic_dec_and_test(&call->a_count))
return;
nlmclnt_release_host(call->a_host);
nlmclnt_release_lockargs(call);
kfree(call);
}
static void nlmclnt_rpc_release(void *data)
{
nlmclnt_release_call(data);
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
{
DEFINE_WAIT(wait);
int status = -EINTR;
prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
if (!signalled ()) {
schedule_timeout(NLMCLNT_GRACE_WAIT);
try_to_freeze();
if (!signalled ())
status = 0;
}
finish_wait(queue, &wait);
return status;
}
/*
* Generic NLM call
*/
static int
nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct nlm_args *argp = &req->a_args;
struct nlm_res *resp = &req->a_res;
struct rpc_message msg = {
.rpc_argp = argp,
.rpc_resp = resp,
.rpc_cred = cred,
};
int status;
dprintk("lockd: call procedure %d on %s\n",
(int)proc, host->h_name);
do {
if (host->h_reclaiming && !argp->reclaim)
goto in_grace_period;
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
msg.rpc_proc = &clnt->cl_procinfo[proc];
/* Perform the RPC call. If an error occurs, try again */
if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
dprintk("lockd: rpc_call returned error %d\n", -status);
switch (status) {
case -EPROTONOSUPPORT:
status = -EINVAL;
break;
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ENOTCONN:
nlm_rebind_host(host);
status = -EAGAIN;
break;
case -ERESTARTSYS:
return signalled () ? -EINTR : status;
default:
break;
}
break;
} else
if (resp->status == nlm_lck_denied_grace_period) {
dprintk("lockd: server in grace period\n");
if (argp->reclaim) {
printk(KERN_WARNING
"lockd: spurious grace period reject?!\n");
return -ENOLCK;
}
} else {
if (!argp->reclaim) {
/* We appear to be out of the grace period */
wake_up_all(&host->h_gracewait);
}
dprintk("lockd: server returns status %d\n", resp->status);
return 0; /* Okay, call complete */
}
in_grace_period:
/*
* The server has rebooted and appears to be in the grace
* period during which locks are only allowed to be
* reclaimed.
* We can only back off and try again later.
*/
status = nlm_wait_on_grace(&host->h_gracewait);
} while (status == 0);
return status;
}
/*
* Generic NLM call, async version.
*/
static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct rpc_task_setup task_setup_data = {
.rpc_message = msg,
.callback_ops = tk_ops,
.callback_data = req,
.flags = RPC_TASK_ASYNC,
};
dprintk("lockd: call procedure %d on %s (async)\n",
(int)proc, host->h_name);
/* If we have no RPC client yet, create one. */
clnt = nlm_bind_host(host);
if (clnt == NULL)
goto out_err;
msg->rpc_proc = &clnt->cl_procinfo[proc];
task_setup_data.rpc_client = clnt;
/* bootstrap and kick off the async RPC call */
return rpc_run_task(&task_setup_data);
out_err:
tk_ops->rpc_release(req);
return ERR_PTR(-ENOLCK);
}
static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
{
struct rpc_task *task;
task = __nlm_async_call(req, proc, msg, tk_ops);
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
return 0;
}
/*
* NLM asynchronous call.
*/
int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_args,
.rpc_resp = &req->a_res,
};
return nlm_do_async_call(req, proc, &msg, tk_ops);
}
int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_res,
};
return nlm_do_async_call(req, proc, &msg, tk_ops);
}
/*
* NLM client asynchronous call.
*
* Note that although the calls are asynchronous, and are therefore
* guaranteed to complete, we still always attempt to wait for
* completion in order to be able to correctly track the lock
* state.
*/
static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct rpc_message msg = {
.rpc_argp = &req->a_args,
.rpc_resp = &req->a_res,
.rpc_cred = cred,
};
struct rpc_task *task;
int err;
task = __nlm_async_call(req, proc, &msg, tk_ops);
if (IS_ERR(task))
return PTR_ERR(task);
err = rpc_wait_for_completion_task(task);
rpc_put_task(task);
return err;
}
/*
* TEST for the presence of a conflicting lock
*/
static int
nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
{
int status;
status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
if (status < 0)
goto out;
switch (req->a_res.status) {
case nlm_granted:
fl->fl_type = F_UNLCK;
break;
case nlm_lck_denied:
/*
* Report the conflicting lock back to the application.
*/
fl->fl_start = req->a_res.lock.fl.fl_start;
fl->fl_end = req->a_res.lock.fl.fl_end;
fl->fl_type = req->a_res.lock.fl.fl_type;
fl->fl_pid = 0;
break;
default:
status = nlm_stat_to_errno(req->a_res.status);
}
out:
nlmclnt_release_call(req);
return status;
}
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
}
static void nlmclnt_locks_release_private(struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
list_del(&fl->fl_u.nfs_fl.list);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
}
static const struct file_lock_operations nlmclnt_lock_ops = {
.fl_copy_lock = nlmclnt_locks_copy_lock,
.fl_release_private = nlmclnt_locks_release_private,
};
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
{
BUG_ON(fl->fl_ops != NULL);
fl->fl_u.nfs_fl.state = 0;
fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
fl->fl_ops = &nlmclnt_lock_ops;
}
static int do_vfs_lock(struct file_lock *fl)
{
int res = 0;
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
case FL_POSIX:
res = posix_lock_file_wait(fl->fl_file, fl);
break;
case FL_FLOCK:
res = flock_lock_file_wait(fl->fl_file, fl);
break;
default:
BUG();
}
return res;
}
/*
* LOCK: Try to create a lock
*
* Programmer Harassment Alert
*
* When given a blocking lock request in a sync RPC call, the HPUX lockd
* will faithfully return LCK_BLOCKED but never cares to notify us when
* the lock could be granted. This way, our local process could hang
* around forever waiting for the callback.
*
* Solution A: Implement busy-waiting
* Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
*
* For now I am implementing solution A, because I hate the idea of
* re-implementing lockd for a third time in two months. The async
* calls shouldn't be too hard to do, however.
*
* This is one of the lovely things about standards in the NFS area:
* they're so soft and squishy you can't really blame HP for doing this.
*/
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
struct nlm_wait *block = NULL;
unsigned char fl_flags = fl->fl_flags;
unsigned char fl_type;
int status = -ENOLCK;
if (nsm_monitor(host) < 0)
goto out;
req->a_args.state = nsm_local_state;
fl->fl_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
fl->fl_flags = fl_flags;
if (status < 0)
goto out;
block = nlmclnt_prepare_block(host, fl);
again:
/*
* Initialise resp->status to a valid non-zero value,
* since 0 == nlm_lck_granted
*/
resp->status = nlm_lck_blocked;
for(;;) {
/* Reboot protection */
fl->fl_u.nfs_fl.state = host->h_state;
status = nlmclnt_call(cred, req, NLMPROC_LOCK);
if (status < 0)
break;
/* Did a reclaimer thread notify us of a server reboot? */
if (resp->status == nlm_lck_denied_grace_period)
continue;
if (resp->status != nlm_lck_blocked)
break;
/* Wait on an NLM blocking lock */
status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
if (status < 0)
break;
if (resp->status != nlm_lck_blocked)
break;
}
/* if we were interrupted while blocking, then cancel the lock request
* and exit
*/
if (resp->status == nlm_lck_blocked) {
if (!req->a_args.block)
goto out_unlock;
if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
goto out_unblock;
}
if (resp->status == nlm_granted) {
down_read(&host->h_rwsem);
/* Check whether or not the server has rebooted */
if (fl->fl_u.nfs_fl.state != host->h_state) {
up_read(&host->h_rwsem);
goto again;
}
/* Ensure the resulting lock will get added to granted list */
fl->fl_flags |= FL_SLEEP;
if (do_vfs_lock(fl) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
up_read(&host->h_rwsem);
fl->fl_flags = fl_flags;
status = 0;
}
if (status < 0)
goto out_unlock;
/*
* EAGAIN doesn't make sense for sleeping locks, and in some
* cases NLM_LCK_DENIED is returned for a permanent error. So
* turn it into an ENOLCK.
*/
if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
status = -ENOLCK;
else
status = nlm_stat_to_errno(resp->status);
out_unblock:
nlmclnt_finish_block(block);
out:
nlmclnt_release_call(req);
return status;
out_unlock:
/* Fatal error: ensure that we remove the lock altogether */
dprintk("lockd: lock attempt ended in fatal error.\n"
" Attempting to unlock.\n");
nlmclnt_finish_block(block);
fl_type = fl->fl_type;
fl->fl_type = F_UNLCK;
down_read(&host->h_rwsem);
do_vfs_lock(fl);
up_read(&host->h_rwsem);
fl->fl_type = fl_type;
fl->fl_flags = fl_flags;
nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
return status;
}
/*
* RECLAIM: Try to reclaim a lock
*/
int
nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
{
struct nlm_rqst reqst, *req;
int status;
req = &reqst;
memset(req, 0, sizeof(*req));
locks_init_lock(&req->a_args.lock.fl);
locks_init_lock(&req->a_res.lock.fl);
req->a_host = host;
req->a_flags = 0;
/* Set up the argument struct */
nlmclnt_setlockargs(req, fl);
req->a_args.reclaim = 1;
status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
if (status >= 0 && req->a_res.status == nlm_granted)
return 0;
printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
"(errno %d, status %d)\n", fl->fl_pid,
status, ntohl(req->a_res.status));
/*
* FIXME: This is a serious failure. We can
*
* a. Ignore the problem
* b. Send the owning process some signal (Linux doesn't have
* SIGLOST, though...)
* c. Retry the operation
*
* Until someone comes up with a simple implementation
* for b or c, I'll choose option a.
*/
return -ENOLCK;
}
/*
* UNLOCK: remove an existing lock
*/
static int
nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
int status;
unsigned char fl_flags = fl->fl_flags;
/*
* Note: the server is supposed to either grant us the unlock
* request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
* case, we want to unlock.
*/
fl->fl_flags |= FL_EXISTS;
down_read(&host->h_rwsem);
status = do_vfs_lock(fl);
up_read(&host->h_rwsem);
fl->fl_flags = fl_flags;
if (status == -ENOENT) {
status = 0;
goto out;
}
atomic_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
if (status < 0)
goto out;
if (resp->status == nlm_granted)
goto out;
if (resp->status != nlm_lck_denied_nolocks)
printk("lockd: unexpected unlock status: %d\n", resp->status);
/* What to do now? I'm out of my depth... */
status = -ENOLCK;
out:
nlmclnt_release_call(req);
return status;
}
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task))
goto die;
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
switch (task->tk_status) {
case -EACCES:
case -EIO:
goto die;
default:
goto retry_rebind;
}
}
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
rpc_delay(task, NLMCLNT_GRACE_WAIT);
goto retry_unlock;
}
if (status != NLM_LCK_GRANTED)
printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
die:
return;
retry_rebind:
nlm_rebind_host(req->a_host);
retry_unlock:
rpc_restart_call(task);
}
static const struct rpc_call_ops nlmclnt_unlock_ops = {
.rpc_call_done = nlmclnt_unlock_callback,
.rpc_release = nlmclnt_rpc_release,
};
/*
* Cancel a blocked lock request.
* We always use an async RPC call for this in order not to hang a
* process that has been Ctrl-C'ed.
*/
static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
{
struct nlm_rqst *req;
int status;
dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
" Attempting to cancel lock.\n");
req = nlm_alloc_call(nlm_get_host(host));
if (!req)
return -ENOMEM;
req->a_flags = RPC_TASK_ASYNC;
nlmclnt_setlockargs(req, fl);
req->a_args.block = block;
atomic_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status == 0 && req->a_res.status == nlm_lck_denied)
status = -ENOLCK;
nlmclnt_release_call(req);
return status;
}
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task))
goto die;
if (task->tk_status < 0) {
dprintk("lockd: CANCEL call error %d, retrying.\n",
task->tk_status);
goto retry_cancel;
}
dprintk("lockd: cancel status %u (task %u)\n",
status, task->tk_pid);
switch (status) {
case NLM_LCK_GRANTED:
case NLM_LCK_DENIED_GRACE_PERIOD:
case NLM_LCK_DENIED:
/* Everything's good */
break;
case NLM_LCK_DENIED_NOLOCKS:
dprintk("lockd: CANCEL failed (server has no locks)\n");
goto retry_cancel;
default:
printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
status);
}
die:
return;
retry_cancel:
/* Don't ever retry more than 3 times */
if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
goto die;
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
}
static const struct rpc_call_ops nlmclnt_cancel_ops = {
.rpc_call_done = nlmclnt_cancel_callback,
.rpc_release = nlmclnt_rpc_release,
};
/*
* Convert an NLM status code to a generic kernel errno
*/
static int
nlm_stat_to_errno(__be32 status)
{
switch(ntohl(status)) {
case NLM_LCK_GRANTED:
return 0;
case NLM_LCK_DENIED:
return -EAGAIN;
case NLM_LCK_DENIED_NOLOCKS:
case NLM_LCK_DENIED_GRACE_PERIOD:
return -ENOLCK;
case NLM_LCK_BLOCKED:
printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
return -ENOLCK;
#ifdef CONFIG_LOCKD_V4
case NLM_DEADLCK:
return -EDEADLK;
case NLM_ROFS:
return -EROFS;
case NLM_STALE_FH:
return -ESTALE;
case NLM_FBIG:
return -EOVERFLOW;
case NLM_FAILED:
return -ENOLCK;
#endif
}
printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
return -ENOLCK;
}
|
27,080,333,202,603 | /*
* linux/include/linux/sunrpc/sched.h
*
* Scheduling primitives for kernel Sun RPC.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#ifndef _LINUX_SUNRPC_SCHED_H_
#define _LINUX_SUNRPC_SCHED_H_
#include <linux/timer.h>
#include <linux/ktime.h>
#include <linux/sunrpc/types.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/xdr.h>
/*
* This is the actual RPC procedure call info.
*/
struct rpc_procinfo;
struct rpc_message {
struct rpc_procinfo * rpc_proc; /* Procedure information */
void * rpc_argp; /* Arguments */
void * rpc_resp; /* Result */
struct rpc_cred * rpc_cred; /* Credentials */
};
struct rpc_call_ops;
struct rpc_wait_queue;
struct rpc_wait {
struct list_head list; /* wait queue links */
struct list_head links; /* Links to related tasks */
struct list_head timer_list; /* Timer list */
unsigned long expires;
};
/*
* This is the RPC task struct
*/
struct rpc_task {
atomic_t tk_count; /* Reference count */
struct list_head tk_task; /* global list of tasks */
struct rpc_clnt * tk_client; /* RPC client */
struct rpc_rqst * tk_rqstp; /* RPC request */
/*
* RPC call state
*/
struct rpc_message tk_msg; /* RPC call info */
/*
* callback to be executed after waking up
* action next procedure for async tasks
* tk_ops caller callbacks
*/
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
const struct rpc_call_ops *tk_ops;
void * tk_calldata;
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned long tk_runstate; /* Task run status */
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
* be any workqueue
*/
struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
union {
struct work_struct tk_work; /* Async task work queue */
struct rpc_wait tk_wait; /* RPC wait */
} u;
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
int tk_status; /* result of last operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
#ifdef RPC_DEBUG
unsigned short tk_pid; /* debugging aid */
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
tk_cred_retry : 2;
};
#define tk_xprt tk_client->cl_xprt
/* support walking a list of tasks on a wait queue */
#define task_for_each(task, pos, head) \
list_for_each(pos, head) \
if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
#define task_for_first(task, head) \
if (!list_empty(head) && \
((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
typedef void (*rpc_action)(struct rpc_task *);
struct rpc_call_ops {
void (*rpc_call_prepare)(struct rpc_task *, void *);
void (*rpc_call_done)(struct rpc_task *, void *);
void (*rpc_release)(void *);
};
struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
struct workqueue_struct *workqueue;
unsigned short flags;
signed char priority;
};
/*
* RPC task flags
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_ACTIVE 2
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_clear_running(t) \
do { \
smp_mb__before_clear_bit(); \
clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
smp_mb__after_clear_bit(); \
} while (0)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_clear_queued(t) \
do { \
smp_mb__before_clear_bit(); \
clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
smp_mb__after_clear_bit(); \
} while (0)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
/*
* Task priorities.
* Note: if you change these, you must also change
* the task initialization definitions below.
*/
#define RPC_PRIORITY_LOW (-1)
#define RPC_PRIORITY_NORMAL (0)
#define RPC_PRIORITY_HIGH (1)
#define RPC_PRIORITY_PRIVILEGED (2)
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
struct rpc_timer {
struct timer_list timer;
struct list_head list;
unsigned long expires;
};
/*
* RPC synchronization objects
*/
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char count; /* # task groups remaining serviced so far */
unsigned char nr; /* # tasks remaining for cookie */
unsigned short qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#ifdef RPC_DEBUG
const char * name;
#endif
};
/*
* This is the # requests to send consecutively
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
#define RPC_BATCH_COUNT 16
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
* Function prototypes
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_destroy_wait_queue(struct rpc_wait_queue *);
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
rpc_action action);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
int rpc_queue_empty(struct rpc_wait_queue *);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_malloc(struct rpc_task *, size_t);
void rpc_free(void *);
int rpciod_up(void);
void rpciod_down(void);
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
#ifdef RPC_DEBUG
void rpc_show_tasks(void);
#endif
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
static inline int rpc_wait_for_completion_task(struct rpc_task *task)
{
return __rpc_wait_for_completion_task(task, NULL);
}
static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio)
{
task->tk_priority = prio - RPC_PRIORITY_LOW;
}
static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio)
{
return (task->tk_priority + RPC_PRIORITY_LOW == prio);
}
#ifdef RPC_DEBUG
static inline const char * rpc_qname(struct rpc_wait_queue *q)
{
return ((q && q->name) ? q->name : "unknown");
}
#endif
#endif /* _LINUX_SUNRPC_SCHED_H_ */
| /*
* linux/include/linux/sunrpc/sched.h
*
* Scheduling primitives for kernel Sun RPC.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#ifndef _LINUX_SUNRPC_SCHED_H_
#define _LINUX_SUNRPC_SCHED_H_
#include <linux/timer.h>
#include <linux/ktime.h>
#include <linux/sunrpc/types.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/xdr.h>
/*
* This is the actual RPC procedure call info.
*/
struct rpc_procinfo;
struct rpc_message {
struct rpc_procinfo * rpc_proc; /* Procedure information */
void * rpc_argp; /* Arguments */
void * rpc_resp; /* Result */
struct rpc_cred * rpc_cred; /* Credentials */
};
struct rpc_call_ops;
struct rpc_wait_queue;
struct rpc_wait {
struct list_head list; /* wait queue links */
struct list_head links; /* Links to related tasks */
struct list_head timer_list; /* Timer list */
unsigned long expires;
};
/*
* This is the RPC task struct
*/
struct rpc_task {
atomic_t tk_count; /* Reference count */
struct list_head tk_task; /* global list of tasks */
struct rpc_clnt * tk_client; /* RPC client */
struct rpc_rqst * tk_rqstp; /* RPC request */
/*
* RPC call state
*/
struct rpc_message tk_msg; /* RPC call info */
/*
* callback to be executed after waking up
* action next procedure for async tasks
* tk_ops caller callbacks
*/
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
const struct rpc_call_ops *tk_ops;
void * tk_calldata;
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned long tk_runstate; /* Task run status */
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
* be any workqueue
*/
struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
union {
struct work_struct tk_work; /* Async task work queue */
struct rpc_wait tk_wait; /* RPC wait */
} u;
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
int tk_status; /* result of last operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
#ifdef RPC_DEBUG
unsigned short tk_pid; /* debugging aid */
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
tk_cred_retry : 2,
tk_rebind_retry : 2;
};
#define tk_xprt tk_client->cl_xprt
/* support walking a list of tasks on a wait queue */
#define task_for_each(task, pos, head) \
list_for_each(pos, head) \
if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
#define task_for_first(task, head) \
if (!list_empty(head) && \
((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
typedef void (*rpc_action)(struct rpc_task *);
struct rpc_call_ops {
void (*rpc_call_prepare)(struct rpc_task *, void *);
void (*rpc_call_done)(struct rpc_task *, void *);
void (*rpc_release)(void *);
};
struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
struct workqueue_struct *workqueue;
unsigned short flags;
signed char priority;
};
/*
* RPC task flags
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_ACTIVE 2
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_clear_running(t) \
do { \
smp_mb__before_clear_bit(); \
clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
smp_mb__after_clear_bit(); \
} while (0)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_clear_queued(t) \
do { \
smp_mb__before_clear_bit(); \
clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
smp_mb__after_clear_bit(); \
} while (0)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
/*
* Task priorities.
* Note: if you change these, you must also change
* the task initialization definitions below.
*/
#define RPC_PRIORITY_LOW (-1)
#define RPC_PRIORITY_NORMAL (0)
#define RPC_PRIORITY_HIGH (1)
#define RPC_PRIORITY_PRIVILEGED (2)
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
struct rpc_timer {
struct timer_list timer;
struct list_head list;
unsigned long expires;
};
/*
* RPC synchronization objects
*/
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char count; /* # task groups remaining serviced so far */
unsigned char nr; /* # tasks remaining for cookie */
unsigned short qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#ifdef RPC_DEBUG
const char * name;
#endif
};
/*
* This is the # requests to send consecutively
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
#define RPC_BATCH_COUNT 16
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
* Function prototypes
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_destroy_wait_queue(struct rpc_wait_queue *);
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
rpc_action action);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
int rpc_queue_empty(struct rpc_wait_queue *);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_malloc(struct rpc_task *, size_t);
void rpc_free(void *);
int rpciod_up(void);
void rpciod_down(void);
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
#ifdef RPC_DEBUG
void rpc_show_tasks(void);
#endif
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
static inline int rpc_wait_for_completion_task(struct rpc_task *task)
{
return __rpc_wait_for_completion_task(task, NULL);
}
static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio)
{
task->tk_priority = prio - RPC_PRIORITY_LOW;
}
static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio)
{
return (task->tk_priority + RPC_PRIORITY_LOW == prio);
}
#ifdef RPC_DEBUG
static inline const char * rpc_qname(struct rpc_wait_queue *q)
{
return ((q && q->name) ? q->name : "unknown");
}
#endif
#endif /* _LINUX_SUNRPC_SCHED_H_ */
|
19,770,972,115,121 | /*
* linux/net/sunrpc/clnt.c
*
* This file contains the high-level RPC interface.
* It is modeled as a finite state machine to support both synchronous
* and asynchronous requests.
*
* - RPC header generation and argument serialization.
* - Credential refresh.
* - TCP connect handling.
* - Retry of operation when it is suspected the operation failed because
* of uid squashing on the server, or when the credentials were stale
* and need to be refreshed, or when a packet was damaged in transit.
* This may be have to be moved to the VFS layer.
*
* Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
* Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <asm/system.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/workqueue.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/un.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/bc_xprt.h>
#include "sunrpc.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
#define dprint_status(t) \
dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
__func__, t->tk_status)
/*
* All RPC clients are linked into this list
*/
static LIST_HEAD(all_clients);
static DEFINE_SPINLOCK(rpc_client_lock);
static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
static void call_start(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
static void call_allocate(struct rpc_task *task);
static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
#if defined(CONFIG_NFS_V4_1)
static void call_bc_transmit(struct rpc_task *task);
#endif /* CONFIG_NFS_V4_1 */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
static void call_refreshresult(struct rpc_task *task);
static void call_timeout(struct rpc_task *task);
static void call_connect(struct rpc_task *task);
static void call_connect_status(struct rpc_task *task);
static __be32 *rpc_encode_header(struct rpc_task *task);
static __be32 *rpc_verify_header(struct rpc_task *task);
static int rpc_ping(struct rpc_clnt *clnt);
static void rpc_register_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_client_lock);
list_add(&clnt->cl_clients, &all_clients);
spin_unlock(&rpc_client_lock);
}
static void rpc_unregister_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_client_lock);
list_del(&clnt->cl_clients);
spin_unlock(&rpc_client_lock);
}
static int
rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
{
static uint32_t clntid;
struct nameidata nd;
struct path path;
char name[15];
struct qstr q = {
.name = name,
};
int error;
clnt->cl_path.mnt = ERR_PTR(-ENOENT);
clnt->cl_path.dentry = ERR_PTR(-ENOENT);
if (dir_name == NULL)
return 0;
path.mnt = rpc_get_mount();
if (IS_ERR(path.mnt))
return PTR_ERR(path.mnt);
error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd);
if (error)
goto err;
for (;;) {
q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
name[sizeof(name) - 1] = '\0';
q.hash = full_name_hash(q.name, q.len);
path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt);
if (!IS_ERR(path.dentry))
break;
error = PTR_ERR(path.dentry);
if (error != -EEXIST) {
printk(KERN_INFO "RPC: Couldn't create pipefs entry"
" %s/%s, error %d\n",
dir_name, name, error);
goto err_path_put;
}
}
path_put(&nd.path);
clnt->cl_path = path;
return 0;
err_path_put:
path_put(&nd.path);
err:
rpc_put_mount();
return error;
}
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
{
struct rpc_program *program = args->program;
struct rpc_version *version;
struct rpc_clnt *clnt = NULL;
struct rpc_auth *auth;
int err;
size_t len;
/* sanity check the name before trying to print it */
err = -EINVAL;
len = strlen(args->servername);
if (len > RPC_MAXNETNAMELEN)
goto out_no_rpciod;
len++;
dprintk("RPC: creating %s client for %s (xprt %p)\n",
program->name, args->servername, xprt);
err = rpciod_up();
if (err)
goto out_no_rpciod;
err = -EINVAL;
if (!xprt)
goto out_no_xprt;
if (args->version >= program->nrvers)
goto out_err;
version = program->version[args->version];
if (version == NULL)
goto out_err;
err = -ENOMEM;
clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
goto out_err;
clnt->cl_parent = clnt;
clnt->cl_server = clnt->cl_inline_name;
if (len > sizeof(clnt->cl_inline_name)) {
char *buf = kmalloc(len, GFP_KERNEL);
if (buf != NULL)
clnt->cl_server = buf;
else
len = sizeof(clnt->cl_inline_name);
}
strlcpy(clnt->cl_server, args->servername, len);
clnt->cl_xprt = xprt;
clnt->cl_procinfo = version->procs;
clnt->cl_maxproc = version->nrprocs;
clnt->cl_protname = program->name;
clnt->cl_prog = args->prognumber ? : program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
clnt->cl_metrics = rpc_alloc_iostats(clnt);
err = -ENOMEM;
if (clnt->cl_metrics == NULL)
goto out_no_stats;
clnt->cl_program = program;
INIT_LIST_HEAD(&clnt->cl_tasks);
spin_lock_init(&clnt->cl_lock);
if (!xprt_bound(clnt->cl_xprt))
clnt->cl_autobind = 1;
clnt->cl_timeout = xprt->timeout;
if (args->timeout != NULL) {
memcpy(&clnt->cl_timeout_default, args->timeout,
sizeof(clnt->cl_timeout_default));
clnt->cl_timeout = &clnt->cl_timeout_default;
}
clnt->cl_rtt = &clnt->cl_rtt_default;
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
clnt->cl_principal = NULL;
if (args->client_name) {
clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
if (!clnt->cl_principal)
goto out_no_principal;
}
atomic_set(&clnt->cl_count, 1);
err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
if (err < 0)
goto out_no_path;
auth = rpcauth_create(args->authflavor, clnt);
if (IS_ERR(auth)) {
printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
args->authflavor);
err = PTR_ERR(auth);
goto out_no_auth;
}
/* save the nodename */
clnt->cl_nodelen = strlen(init_utsname()->nodename);
if (clnt->cl_nodelen > UNX_MAXNODENAME)
clnt->cl_nodelen = UNX_MAXNODENAME;
memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
rpc_register_client(clnt);
return clnt;
out_no_auth:
if (!IS_ERR(clnt->cl_path.dentry)) {
rpc_remove_client_dir(clnt->cl_path.dentry);
rpc_put_mount();
}
out_no_path:
kfree(clnt->cl_principal);
out_no_principal:
rpc_free_iostats(clnt->cl_metrics);
out_no_stats:
if (clnt->cl_server != clnt->cl_inline_name)
kfree(clnt->cl_server);
kfree(clnt);
out_err:
xprt_put(xprt);
out_no_xprt:
rpciod_down();
out_no_rpciod:
return ERR_PTR(err);
}
/*
* rpc_create - create an RPC client and transport with one call
* @args: rpc_clnt create argument structure
*
* Creates and initializes an RPC transport and an RPC client.
*
* It can ping the server in order to determine if it is up, and to see if
* it supports this program and version. RPC_CLNT_CREATE_NOPING disables
* this behavior so asynchronous tasks can also use rpc_create.
*/
struct rpc_clnt *rpc_create(struct rpc_create_args *args)
{
struct rpc_xprt *xprt;
struct rpc_clnt *clnt;
struct xprt_create xprtargs = {
.net = args->net,
.ident = args->protocol,
.srcaddr = args->saddress,
.dstaddr = args->address,
.addrlen = args->addrsize,
.bc_xprt = args->bc_xprt,
};
char servername[48];
/*
* If the caller chooses not to specify a hostname, whip
* up a string representation of the passed-in address.
*/
if (args->servername == NULL) {
struct sockaddr_un *sun =
(struct sockaddr_un *)args->address;
struct sockaddr_in *sin =
(struct sockaddr_in *)args->address;
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)args->address;
servername[0] = '\0';
switch (args->address->sa_family) {
case AF_LOCAL:
snprintf(servername, sizeof(servername), "%s",
sun->sun_path);
break;
case AF_INET:
snprintf(servername, sizeof(servername), "%pI4",
&sin->sin_addr.s_addr);
break;
case AF_INET6:
snprintf(servername, sizeof(servername), "%pI6",
&sin6->sin6_addr);
break;
default:
/* caller wants default server name, but
* address family isn't recognized. */
return ERR_PTR(-EINVAL);
}
args->servername = servername;
}
xprt = xprt_create_transport(&xprtargs);
if (IS_ERR(xprt))
return (struct rpc_clnt *)xprt;
/*
* By default, kernel RPC client connects from a reserved port.
* CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
* but it is always enabled for rpciod, which handles the connect
* operation.
*/
xprt->resvport = 1;
if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
xprt->resvport = 0;
clnt = rpc_new_client(args, xprt);
if (IS_ERR(clnt))
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
int err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
}
}
clnt->cl_softrtry = 1;
if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
clnt->cl_softrtry = 0;
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
clnt->cl_autobind = 1;
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
clnt->cl_discrtry = 1;
if (!(args->flags & RPC_CLNT_CREATE_QUIET))
clnt->cl_chatty = 1;
return clnt;
}
EXPORT_SYMBOL_GPL(rpc_create);
/*
* This function clones the RPC client structure. It allows us to share the
* same transport while varying parameters such as the authentication
* flavour.
*/
struct rpc_clnt *
rpc_clone_client(struct rpc_clnt *clnt)
{
struct rpc_clnt *new;
int err = -ENOMEM;
new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
if (!new)
goto out_no_clnt;
new->cl_parent = clnt;
/* Turn off autobind on clones */
new->cl_autobind = 0;
INIT_LIST_HEAD(&new->cl_tasks);
spin_lock_init(&new->cl_lock);
rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
new->cl_metrics = rpc_alloc_iostats(clnt);
if (new->cl_metrics == NULL)
goto out_no_stats;
if (clnt->cl_principal) {
new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
if (new->cl_principal == NULL)
goto out_no_principal;
}
atomic_set(&new->cl_count, 1);
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
goto out_no_path;
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
xprt_get(clnt->cl_xprt);
atomic_inc(&clnt->cl_count);
rpc_register_client(new);
rpciod_up();
return new;
out_no_path:
kfree(new->cl_principal);
out_no_principal:
rpc_free_iostats(new->cl_metrics);
out_no_stats:
kfree(new);
out_no_clnt:
dprintk("RPC: %s: returned error %d\n", __func__, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(rpc_clone_client);
/*
* Kill all tasks for the given client.
* XXX: kill their descendants as well?
*/
void rpc_killall_tasks(struct rpc_clnt *clnt)
{
struct rpc_task *rovr;
if (list_empty(&clnt->cl_tasks))
return;
dprintk("RPC: killing all tasks for client %p\n", clnt);
/*
* Spin lock all_tasks to prevent changes...
*/
spin_lock(&clnt->cl_lock);
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
if (!RPC_IS_ACTIVATED(rovr))
continue;
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
if (RPC_IS_QUEUED(rovr))
rpc_wake_up_queued_task(rovr->tk_waitqueue,
rovr);
}
}
spin_unlock(&clnt->cl_lock);
}
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
/*
* Properly shut down an RPC client, terminating all outstanding
* requests.
*/
void rpc_shutdown_client(struct rpc_clnt *clnt)
{
dprintk("RPC: shutting down %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
while (!list_empty(&clnt->cl_tasks)) {
rpc_killall_tasks(clnt);
wait_event_timeout(destroy_wait,
list_empty(&clnt->cl_tasks), 1*HZ);
}
rpc_release_client(clnt);
}
EXPORT_SYMBOL_GPL(rpc_shutdown_client);
/*
* Free an RPC client
*/
static void
rpc_free_client(struct rpc_clnt *clnt)
{
dprintk("RPC: destroying %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
if (!IS_ERR(clnt->cl_path.dentry)) {
rpc_remove_client_dir(clnt->cl_path.dentry);
rpc_put_mount();
}
if (clnt->cl_parent != clnt) {
rpc_release_client(clnt->cl_parent);
goto out_free;
}
if (clnt->cl_server != clnt->cl_inline_name)
kfree(clnt->cl_server);
out_free:
rpc_unregister_client(clnt);
rpc_free_iostats(clnt->cl_metrics);
kfree(clnt->cl_principal);
clnt->cl_metrics = NULL;
xprt_put(clnt->cl_xprt);
rpciod_down();
kfree(clnt);
}
/*
* Free an RPC client
*/
static void
rpc_free_auth(struct rpc_clnt *clnt)
{
if (clnt->cl_auth == NULL) {
rpc_free_client(clnt);
return;
}
/*
* Note: RPCSEC_GSS may need to send NULL RPC calls in order to
* release remaining GSS contexts. This mechanism ensures
* that it can do so safely.
*/
atomic_inc(&clnt->cl_count);
rpcauth_release(clnt->cl_auth);
clnt->cl_auth = NULL;
if (atomic_dec_and_test(&clnt->cl_count))
rpc_free_client(clnt);
}
/*
* Release reference to the RPC client
*/
void
rpc_release_client(struct rpc_clnt *clnt)
{
dprintk("RPC: rpc_release_client(%p)\n", clnt);
if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait);
if (atomic_dec_and_test(&clnt->cl_count))
rpc_free_auth(clnt);
}
/**
* rpc_bind_new_program - bind a new RPC program to an existing client
* @old: old rpc_client
* @program: rpc program to set
* @vers: rpc program version
*
* Clones the rpc client and sets up a new RPC program. This is mainly
* of use for enabling different RPC programs to share the same transport.
* The Sun NFSv2/v3 ACL protocol can do this.
*/
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
struct rpc_program *program,
u32 vers)
{
struct rpc_clnt *clnt;
struct rpc_version *version;
int err;
BUG_ON(vers >= program->nrvers || !program->version[vers]);
version = program->version[vers];
clnt = rpc_clone_client(old);
if (IS_ERR(clnt))
goto out;
clnt->cl_procinfo = version->procs;
clnt->cl_maxproc = version->nrprocs;
clnt->cl_protname = program->name;
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
}
out:
return clnt;
}
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
void rpc_task_release_client(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
if (clnt != NULL) {
/* Remove from client task list */
spin_lock(&clnt->cl_lock);
list_del(&task->tk_task);
spin_unlock(&clnt->cl_lock);
task->tk_client = NULL;
rpc_release_client(clnt);
}
}
static
void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
{
if (clnt != NULL) {
rpc_task_release_client(task);
task->tk_client = clnt;
atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
/* Add to the client's list of all tasks */
spin_lock(&clnt->cl_lock);
list_add_tail(&task->tk_task, &clnt->cl_tasks);
spin_unlock(&clnt->cl_lock);
}
}
void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
{
rpc_task_release_client(task);
rpc_task_set_client(task, clnt);
}
EXPORT_SYMBOL_GPL(rpc_task_reset_client);
static void
rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
{
if (msg != NULL) {
task->tk_msg.rpc_proc = msg->rpc_proc;
task->tk_msg.rpc_argp = msg->rpc_argp;
task->tk_msg.rpc_resp = msg->rpc_resp;
if (msg->rpc_cred != NULL)
task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
}
}
/*
* Default callback for async RPC calls
*/
static void
rpc_default_callback(struct rpc_task *task, void *data)
{
}
static const struct rpc_call_ops rpc_default_ops = {
.rpc_call_done = rpc_default_callback,
};
/**
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
* @task_setup_data: pointer to task initialisation data
*/
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
{
struct rpc_task *task;
task = rpc_new_task(task_setup_data);
if (IS_ERR(task))
goto out;
rpc_task_set_client(task, task_setup_data->rpc_client);
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
if (task->tk_action == NULL)
rpc_call_start(task);
atomic_inc(&task->tk_count);
rpc_execute(task);
out:
return task;
}
EXPORT_SYMBOL_GPL(rpc_run_task);
/**
* rpc_call_sync - Perform a synchronous RPC call
* @clnt: pointer to RPC client
* @msg: RPC call parameters
* @flags: RPC call flags
*/
int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
{
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &rpc_default_ops,
.flags = flags,
};
int status;
BUG_ON(flags & RPC_TASK_ASYNC);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = task->tk_status;
rpc_put_task(task);
return status;
}
EXPORT_SYMBOL_GPL(rpc_call_sync);
/**
* rpc_call_async - Perform an asynchronous RPC call
* @clnt: pointer to RPC client
* @msg: RPC call parameters
* @flags: RPC call flags
* @tk_ops: RPC call ops
* @data: user call data
*/
int
rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
const struct rpc_call_ops *tk_ops, void *data)
{
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = tk_ops,
.callback_data = data,
.flags = flags|RPC_TASK_ASYNC,
};
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
return 0;
}
EXPORT_SYMBOL_GPL(rpc_call_async);
#if defined(CONFIG_NFS_V4_1)
/**
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
* @req: RPC request
* @tk_ops: RPC call ops
*/
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *tk_ops)
{
struct rpc_task *task;
struct xdr_buf *xbufp = &req->rq_snd_buf;
struct rpc_task_setup task_setup_data = {
.callback_ops = tk_ops,
};
dprintk("RPC: rpc_run_bc_task req= %p\n", req);
/*
* Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data);
if (IS_ERR(task)) {
xprt_free_bc_request(req);
goto out;
}
task->tk_rqstp = req;
/*
* Set up the xdr_buf length.
* This also indicates that the buffer is XDR encoded already.
*/
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
xbufp->tail[0].iov_len;
task->tk_action = call_bc_transmit;
atomic_inc(&task->tk_count);
BUG_ON(atomic_read(&task->tk_count) != 2);
rpc_execute(task);
out:
dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
return task;
}
#endif /* CONFIG_NFS_V4_1 */
void
rpc_call_start(struct rpc_task *task)
{
task->tk_action = call_start;
}
EXPORT_SYMBOL_GPL(rpc_call_start);
/**
* rpc_peeraddr - extract remote peer address from clnt's xprt
* @clnt: RPC client structure
* @buf: target buffer
* @bufsize: length of target buffer
*
* Returns the number of bytes that are actually in the stored address.
*/
size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
{
size_t bytes;
struct rpc_xprt *xprt = clnt->cl_xprt;
bytes = sizeof(xprt->addr);
if (bytes > bufsize)
bytes = bufsize;
memcpy(buf, &clnt->cl_xprt->addr, bytes);
return xprt->addrlen;
}
EXPORT_SYMBOL_GPL(rpc_peeraddr);
/**
* rpc_peeraddr2str - return remote peer address in printable format
* @clnt: RPC client structure
* @format: address format
*
*/
const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
enum rpc_display_format_t format)
{
struct rpc_xprt *xprt = clnt->cl_xprt;
if (xprt->address_strings[format] != NULL)
return xprt->address_strings[format];
else
return "unprintable";
}
EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
void
rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
{
struct rpc_xprt *xprt = clnt->cl_xprt;
if (xprt->ops->set_buffer_size)
xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
}
EXPORT_SYMBOL_GPL(rpc_setbufsize);
/*
* Return size of largest payload RPC client can support, in bytes
*
* For stream transports, this is one RPC record fragment (see RFC
* 1831), as we don't support multi-record requests yet. For datagram
* transports, this is the size of an IP packet minus the IP, UDP, and
* RPC header sizes.
*/
size_t rpc_max_payload(struct rpc_clnt *clnt)
{
return clnt->cl_xprt->max_payload;
}
EXPORT_SYMBOL_GPL(rpc_max_payload);
/**
* rpc_force_rebind - force transport to check that remote port is unchanged
* @clnt: client to rebind
*
*/
void rpc_force_rebind(struct rpc_clnt *clnt)
{
if (clnt->cl_autobind)
xprt_clear_bound(clnt->cl_xprt);
}
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/*
* Restart an (async) RPC call from the call_prepare state.
* Usually called from within the exit handler.
*/
int
rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = rpc_prepare_task;
return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
*/
int
rpc_restart_call(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = call_start;
return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call);
#ifdef RPC_DEBUG
static const char *rpc_proc_name(const struct rpc_task *task)
{
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
if (proc) {
if (proc->p_name)
return proc->p_name;
else
return "NULL";
} else
return "no proc";
}
#endif
/*
* 0. Initial state
*
* Other FSM states can be visited zero or more times, but
* this state is visited exactly once for each RPC.
*/
static void
call_start(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
clnt->cl_protname, clnt->cl_vers,
rpc_proc_name(task),
(RPC_IS_ASYNC(task) ? "async" : "sync"));
/* Increment call count */
task->tk_msg.rpc_proc->p_count++;
clnt->cl_stats->rpccnt++;
task->tk_action = call_reserve;
}
/*
* 1. Reserve an RPC call slot
*/
static void
call_reserve(struct rpc_task *task)
{
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_reserveresult;
xprt_reserve(task);
}
/*
* 1b. Grok the result of xprt_reserve()
*/
static void
call_reserveresult(struct rpc_task *task)
{
int status = task->tk_status;
dprint_status(task);
/*
* After a call to xprt_reserve(), we must have either
* a request slot or else an error status.
*/
task->tk_status = 0;
if (status >= 0) {
if (task->tk_rqstp) {
task->tk_action = call_refresh;
return;
}
printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
__func__, status);
rpc_exit(task, -EIO);
return;
}
/*
* Even though there was an error, we may have acquired
* a request slot somehow. Make sure not to leak it.
*/
if (task->tk_rqstp) {
printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
__func__, status);
xprt_release(task);
}
switch (status) {
case -EAGAIN: /* woken up; retry */
task->tk_action = call_reserve;
return;
case -EIO: /* probably a shutdown */
break;
default:
printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
__func__, status);
break;
}
rpc_exit(task, status);
}
/*
* 2. Bind and/or refresh the credentials
*/
static void
call_refresh(struct rpc_task *task)
{
dprint_status(task);
task->tk_action = call_refreshresult;
task->tk_status = 0;
task->tk_client->cl_stats->rpcauthrefresh++;
rpcauth_refreshcred(task);
}
/*
* 2a. Process the results of a credential refresh
*/
static void
call_refreshresult(struct rpc_task *task)
{
int status = task->tk_status;
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_refresh;
switch (status) {
case 0:
if (rpcauth_uptodatecred(task))
task->tk_action = call_allocate;
return;
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
case -EAGAIN:
status = -EACCES;
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry refresh creds\n",
task->tk_pid, __func__);
return;
}
dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
task->tk_pid, __func__, status);
rpc_exit(task, status);
}
/*
* 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
* (Note: buffer memory is freed in xprt_release).
*/
static void
call_allocate(struct rpc_task *task)
{
unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_bind;
if (req->rq_buffer)
return;
if (proc->p_proc != 0) {
BUG_ON(proc->p_arglen == 0);
if (proc->p_decode != NULL)
BUG_ON(proc->p_replen == 0);
}
/*
* Calculate the size (in quads) of the RPC call
* and reply headers, and convert both values
* to byte sizes.
*/
req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
req->rq_callsize <<= 2;
req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
req->rq_rcvsize <<= 2;
req->rq_buffer = xprt->ops->buf_alloc(task,
req->rq_callsize + req->rq_rcvsize);
if (req->rq_buffer != NULL)
return;
dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
if (RPC_IS_ASYNC(task) || !signalled()) {
task->tk_action = call_allocate;
rpc_delay(task, HZ>>4);
return;
}
rpc_exit(task, -ERESTARTSYS);
}
static inline int
rpc_task_need_encode(struct rpc_task *task)
{
return task->tk_rqstp->rq_snd_buf.len == 0;
}
static inline void
rpc_task_force_reencode(struct rpc_task *task)
{
task->tk_rqstp->rq_snd_buf.len = 0;
task->tk_rqstp->rq_bytes_sent = 0;
}
static inline void
rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
{
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
buf->page_len = 0;
buf->flags = 0;
buf->len = 0;
buf->buflen = len;
}
/*
* 3. Encode arguments of an RPC call
*/
static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
kxdreproc_t encode;
__be32 *p;
dprint_status(task);
rpc_xdr_buf_init(&req->rq_snd_buf,
req->rq_buffer,
req->rq_callsize);
rpc_xdr_buf_init(&req->rq_rcv_buf,
(char *)req->rq_buffer + req->rq_callsize,
req->rq_rcvsize);
p = rpc_encode_header(task);
if (p == NULL) {
printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
rpc_exit(task, -EIO);
return;
}
encode = task->tk_msg.rpc_proc->p_encode;
if (encode == NULL)
return;
task->tk_status = rpcauth_wrap_req(task, encode, req, p,
task->tk_msg.rpc_argp);
}
/*
* 4. Get the server port number if not yet set
*/
static void
call_bind(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
dprint_status(task);
task->tk_action = call_connect;
if (!xprt_bound(xprt)) {
task->tk_action = call_bind_status;
task->tk_timeout = xprt->bind_timeout;
xprt->ops->rpcbind(task);
}
}
/*
* 4a. Sort out bind result
*/
static void
call_bind_status(struct rpc_task *task)
{
int status = -EIO;
if (task->tk_status >= 0) {
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_connect;
return;
}
switch (task->tk_status) {
case -ENOMEM:
dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
rpc_delay(task, HZ >> 2);
goto retry_timeout;
case -EACCES:
dprintk("RPC: %5u remote rpcbind: RPC program/version "
"unavailable\n", task->tk_pid);
/* fail immediately if this is an RPC ping */
if (task->tk_msg.rpc_proc->p_proc == 0) {
status = -EOPNOTSUPP;
break;
}
rpc_delay(task, 3*HZ);
goto retry_timeout;
case -ETIMEDOUT:
dprintk("RPC: %5u rpcbind request timed out\n",
task->tk_pid);
goto retry_timeout;
case -EPFNOSUPPORT:
/* server doesn't support any rpcbind version we know of */
dprintk("RPC: %5u unrecognized remote rpcbind service\n",
task->tk_pid);
break;
case -EPROTONOSUPPORT:
dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
task->tk_pid);
task->tk_status = 0;
task->tk_action = call_bind;
return;
case -ECONNREFUSED: /* connection problems */
case -ECONNRESET:
case -ENOTCONN:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
case -EPIPE:
dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
task->tk_pid, task->tk_status);
if (!RPC_IS_SOFTCONN(task)) {
rpc_delay(task, 5*HZ);
goto retry_timeout;
}
status = task->tk_status;
break;
default:
dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
task->tk_pid, -task->tk_status);
}
rpc_exit(task, status);
return;
retry_timeout:
task->tk_action = call_timeout;
}
/*
* 4b. Connect to the RPC server
*/
static void
call_connect(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
dprintk("RPC: %5u call_connect xprt %p %s connected\n",
task->tk_pid, xprt,
(xprt_connected(xprt) ? "is" : "is not"));
task->tk_action = call_transmit;
if (!xprt_connected(xprt)) {
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
xprt_connect(task);
}
}
/*
* 4c. Sort out connect result
*/
static void
call_connect_status(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
dprint_status(task);
task->tk_status = 0;
if (status >= 0 || status == -EAGAIN) {
clnt->cl_stats->netreconn++;
task->tk_action = call_transmit;
return;
}
switch (status) {
/* if soft mounted, test if we've timed out */
case -ETIMEDOUT:
task->tk_action = call_timeout;
break;
default:
rpc_exit(task, -EIO);
}
}
/*
* 5. Transmit the RPC request, and wait for reply
*/
static void
call_transmit(struct rpc_task *task)
{
dprint_status(task);
task->tk_action = call_status;
if (task->tk_status < 0)
return;
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status != 0)
return;
task->tk_action = call_transmit_status;
/* Encode here so that rpcsec_gss can use correct sequence number. */
if (rpc_task_need_encode(task)) {
BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
if (task->tk_status != 0) {
/* Was the error nonfatal? */
if (task->tk_status == -EAGAIN)
rpc_delay(task, HZ >> 4);
else
rpc_exit(task, task->tk_status);
return;
}
}
xprt_transmit(task);
if (task->tk_status < 0)
return;
/*
* On success, ensure that we call xprt_end_transmit() before sleeping
* in order to allow access to the socket to other RPC requests.
*/
call_transmit_status(task);
if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
}
/*
* 5a. Handle cleanup after a transmission
*/
static void
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
/*
* Common case: success. Force the compiler to put this
* test first.
*/
if (task->tk_status == 0) {
xprt_end_transmit(task);
rpc_task_force_reencode(task);
return;
}
switch (task->tk_status) {
case -EAGAIN:
break;
default:
dprint_status(task);
xprt_end_transmit(task);
rpc_task_force_reencode(task);
break;
/*
* Special cases: if we've been waiting on the
* socket's write_space() callback, or if the
* socket just returned a connection error,
* then hold onto the transport lock.
*/
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
if (RPC_IS_SOFTCONN(task)) {
xprt_end_transmit(task);
rpc_exit(task, task->tk_status);
break;
}
case -ECONNRESET:
case -ENOTCONN:
case -EPIPE:
rpc_task_force_reencode(task);
}
}
#if defined(CONFIG_NFS_V4_1)
/*
* 5b. Send the backchannel RPC reply. On error, drop the reply. In
* addition, disconnect on connectivity errors.
*/
static void
call_bc_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
BUG_ON(task->tk_status != 0);
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status == -EAGAIN) {
/*
* Could not reserve the transport. Try again after the
* transport is released.
*/
task->tk_status = 0;
task->tk_action = call_bc_transmit;
return;
}
task->tk_action = rpc_exit_task;
if (task->tk_status < 0) {
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
return;
}
xprt_transmit(task);
xprt_end_transmit(task);
dprint_status(task);
switch (task->tk_status) {
case 0:
/* Success */
break;
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
case -ETIMEDOUT:
/*
* Problem reaching the server. Disconnect and let the
* forechannel reestablish the connection. The server will
* have to retransmit the backchannel request and we'll
* reprocess it. Since these ops are idempotent, there's no
* need to cache our reply at this time.
*/
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
break;
default:
/*
* We were unable to reply and will have to drop the
* request. The server should reconnect and retransmit.
*/
BUG_ON(task->tk_status == -EAGAIN);
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
break;
}
rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
}
#endif /* CONFIG_NFS_V4_1 */
/*
* 6. Sort out the RPC call status
*/
static void
call_status(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
int status;
if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_reply_bytes_recvd;
dprint_status(task);
status = task->tk_status;
if (status >= 0) {
task->tk_action = call_decode;
return;
}
task->tk_status = 0;
switch(status) {
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
/*
* Delay any retries for 3 seconds, then handle as if it
* were a timeout.
*/
rpc_delay(task, 3*HZ);
case -ETIMEDOUT:
task->tk_action = call_timeout;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
break;
case -ECONNRESET:
case -ECONNREFUSED:
rpc_force_rebind(clnt);
rpc_delay(task, 3*HZ);
case -EPIPE:
case -ENOTCONN:
task->tk_action = call_bind;
break;
case -EAGAIN:
task->tk_action = call_transmit;
break;
case -EIO:
/* shutdown or soft timeout */
rpc_exit(task, status);
break;
default:
if (clnt->cl_chatty)
printk("%s: RPC call returned error %d\n",
clnt->cl_protname, -status);
rpc_exit(task, status);
}
}
/*
* 6a. Handle RPC timeout
* We do not release the request slot, so we keep using the
* same XID for all retransmits.
*/
static void
call_timeout(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
goto retry;
}
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
task->tk_timeouts++;
if (RPC_IS_SOFTCONN(task)) {
rpc_exit(task, -ETIMEDOUT);
return;
}
if (RPC_IS_SOFT(task)) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname, clnt->cl_server);
if (task->tk_flags & RPC_TASK_TIMEOUT)
rpc_exit(task, -ETIMEDOUT);
else
rpc_exit(task, -EIO);
return;
}
if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN;
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
clnt->cl_protname, clnt->cl_server);
}
rpc_force_rebind(clnt);
/*
* Did our request time out due to an RPCSEC_GSS out-of-sequence
* event? RFC2203 requires the server to drop all such requests.
*/
rpcauth_invalcred(task);
retry:
clnt->cl_stats->rpcretrans++;
task->tk_action = call_bind;
task->tk_status = 0;
}
/*
* 7. Decode the RPC reply
*/
static void
call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
dprintk("RPC: %5u call_decode (status %d)\n",
task->tk_pid, task->tk_status);
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s OK\n",
clnt->cl_protname, clnt->cl_server);
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
}
/*
* Ensure that we see all writes made by xprt_complete_rqst()
* before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
req->rq_rcv_buf.len = req->rq_private_buf.len;
/* Check that the softirq receive buffer is valid */
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
sizeof(req->rq_rcv_buf)) != 0);
if (req->rq_rcv_buf.len < 12) {
if (!RPC_IS_SOFT(task)) {
task->tk_action = call_bind;
clnt->cl_stats->rpcretrans++;
goto out_retry;
}
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
task->tk_action = call_timeout;
goto out_retry;
}
p = rpc_verify_header(task);
if (IS_ERR(p)) {
if (p == ERR_PTR(-EAGAIN))
goto out_retry;
return;
}
task->tk_action = rpc_exit_task;
if (decode) {
task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
task->tk_msg.rpc_resp);
}
dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
task->tk_status);
return;
out_retry:
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
}
}
static __be32 *
rpc_encode_header(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
__be32 *p = req->rq_svec[0].iov_base;
/* FIXME: check buffer size? */
p = xprt_skip_transport_header(task->tk_xprt, p);
*p++ = req->rq_xid; /* XID */
*p++ = htonl(RPC_CALL); /* CALL */
*p++ = htonl(RPC_VERSION); /* RPC version */
*p++ = htonl(clnt->cl_prog); /* program number */
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
p = rpcauth_marshcred(task, p);
req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
return p;
}
static __be32 *
rpc_verify_header(struct rpc_task *task)
{
struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
__be32 *p = iov->iov_base;
u32 n;
int error = -EACCES;
if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
/* RFC-1014 says that the representation of XDR data must be a
* multiple of four bytes
* - if it isn't pointer subtraction in the NFS client may give
* undefined results
*/
dprintk("RPC: %5u %s: XDR representation not a multiple of"
" 4 bytes: 0x%x\n", task->tk_pid, __func__,
task->tk_rqstp->rq_rcv_buf.len);
goto out_eio;
}
if ((len -= 3) < 0)
goto out_overflow;
p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
task->tk_pid, __func__, n);
goto out_garbage;
}
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_AUTH_ERROR:
break;
case RPC_MISMATCH:
dprintk("RPC: %5u %s: RPC call version "
"mismatch!\n",
task->tk_pid, __func__);
error = -EPROTONOSUPPORT;
goto out_err;
default:
dprintk("RPC: %5u %s: RPC call rejected, "
"unknown error: %x\n",
task->tk_pid, __func__, n);
goto out_eio;
}
if (--len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_AUTH_REJECTEDCRED:
case RPC_AUTH_REJECTEDVERF:
case RPCSEC_GSS_CREDPROBLEM:
case RPCSEC_GSS_CTXPROBLEM:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry stale creds\n",
task->tk_pid, __func__);
rpcauth_invalcred(task);
/* Ensure we obtain a new XID! */
xprt_release(task);
task->tk_action = call_reserve;
goto out_retry;
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
/* possibly garbled cred/verf? */
if (!task->tk_garb_retry)
break;
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retry garbled creds\n",
task->tk_pid, __func__);
task->tk_action = call_bind;
goto out_retry;
case RPC_AUTH_TOOWEAK:
printk(KERN_NOTICE "RPC: server %s requires stronger "
"authentication.\n", task->tk_client->cl_server);
break;
default:
dprintk("RPC: %5u %s: unknown auth error: %x\n",
task->tk_pid, __func__, n);
error = -EIO;
}
dprintk("RPC: %5u %s: call rejected %d\n",
task->tk_pid, __func__, n);
goto out_err;
}
if (!(p = rpcauth_checkverf(task, p))) {
dprintk("RPC: %5u %s: auth check failed\n",
task->tk_pid, __func__);
goto out_garbage; /* bad verifier, retry */
}
len = p - (__be32 *)iov->iov_base - 1;
if (len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_SUCCESS:
return p;
case RPC_PROG_UNAVAIL:
dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
task->tk_pid, __func__,
(unsigned int)task->tk_client->cl_prog,
task->tk_client->cl_server);
error = -EPFNOSUPPORT;
goto out_err;
case RPC_PROG_MISMATCH:
dprintk("RPC: %5u %s: program %u, version %u unsupported by "
"server %s\n", task->tk_pid, __func__,
(unsigned int)task->tk_client->cl_prog,
(unsigned int)task->tk_client->cl_vers,
task->tk_client->cl_server);
error = -EPROTONOSUPPORT;
goto out_err;
case RPC_PROC_UNAVAIL:
dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
"version %u on server %s\n",
task->tk_pid, __func__,
rpc_proc_name(task),
task->tk_client->cl_prog,
task->tk_client->cl_vers,
task->tk_client->cl_server);
error = -EOPNOTSUPP;
goto out_err;
case RPC_GARBAGE_ARGS:
dprintk("RPC: %5u %s: server saw garbage\n",
task->tk_pid, __func__);
break; /* retry */
default:
dprintk("RPC: %5u %s: server accept status: %x\n",
task->tk_pid, __func__, n);
/* Also retry */
}
out_garbage:
task->tk_client->cl_stats->rpcgarbage++;
if (task->tk_garb_retry) {
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retrying\n",
task->tk_pid, __func__);
task->tk_action = call_bind;
out_retry:
return ERR_PTR(-EAGAIN);
}
out_eio:
error = -EIO;
out_err:
rpc_exit(task, error);
dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
__func__, error);
return ERR_PTR(error);
out_overflow:
dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
__func__);
goto out_garbage;
}
static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
}
static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
return 0;
}
static struct rpc_procinfo rpcproc_null = {
.p_encode = rpcproc_encode_null,
.p_decode = rpcproc_decode_null,
};
static int rpc_ping(struct rpc_clnt *clnt)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
};
int err;
msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
put_rpccred(msg.rpc_cred);
return err;
}
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_message = &msg,
.callback_ops = &rpc_default_ops,
.flags = flags,
};
return rpc_run_task(&task_setup_data);
}
EXPORT_SYMBOL_GPL(rpc_call_null);
#ifdef RPC_DEBUG
static void rpc_show_header(void)
{
printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
"-timeout ---ops--\n");
}
static void rpc_show_task(const struct rpc_clnt *clnt,
const struct rpc_task *task)
{
const char *rpc_waitq = "none";
if (RPC_IS_QUEUED(task))
rpc_waitq = rpc_qname(task->tk_waitqueue);
printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
task->tk_pid, task->tk_flags, task->tk_status,
clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
task->tk_action, rpc_waitq);
}
void rpc_show_tasks(void)
{
struct rpc_clnt *clnt;
struct rpc_task *task;
int header = 0;
spin_lock(&rpc_client_lock);
list_for_each_entry(clnt, &all_clients, cl_clients) {
spin_lock(&clnt->cl_lock);
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
if (!header) {
rpc_show_header();
header++;
}
rpc_show_task(clnt, task);
}
spin_unlock(&clnt->cl_lock);
}
spin_unlock(&rpc_client_lock);
}
#endif
| /*
* linux/net/sunrpc/clnt.c
*
* This file contains the high-level RPC interface.
* It is modeled as a finite state machine to support both synchronous
* and asynchronous requests.
*
* - RPC header generation and argument serialization.
* - Credential refresh.
* - TCP connect handling.
* - Retry of operation when it is suspected the operation failed because
* of uid squashing on the server, or when the credentials were stale
* and need to be refreshed, or when a packet was damaged in transit.
* This may be have to be moved to the VFS layer.
*
* Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
* Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <asm/system.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/workqueue.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/un.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/bc_xprt.h>
#include "sunrpc.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
#endif
#define dprint_status(t) \
dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
__func__, t->tk_status)
/*
* All RPC clients are linked into this list
*/
static LIST_HEAD(all_clients);
static DEFINE_SPINLOCK(rpc_client_lock);
static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
static void call_start(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
static void call_allocate(struct rpc_task *task);
static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
#if defined(CONFIG_NFS_V4_1)
static void call_bc_transmit(struct rpc_task *task);
#endif /* CONFIG_NFS_V4_1 */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
static void call_refreshresult(struct rpc_task *task);
static void call_timeout(struct rpc_task *task);
static void call_connect(struct rpc_task *task);
static void call_connect_status(struct rpc_task *task);
static __be32 *rpc_encode_header(struct rpc_task *task);
static __be32 *rpc_verify_header(struct rpc_task *task);
static int rpc_ping(struct rpc_clnt *clnt);
static void rpc_register_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_client_lock);
list_add(&clnt->cl_clients, &all_clients);
spin_unlock(&rpc_client_lock);
}
static void rpc_unregister_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_client_lock);
list_del(&clnt->cl_clients);
spin_unlock(&rpc_client_lock);
}
static int
rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
{
static uint32_t clntid;
struct nameidata nd;
struct path path;
char name[15];
struct qstr q = {
.name = name,
};
int error;
clnt->cl_path.mnt = ERR_PTR(-ENOENT);
clnt->cl_path.dentry = ERR_PTR(-ENOENT);
if (dir_name == NULL)
return 0;
path.mnt = rpc_get_mount();
if (IS_ERR(path.mnt))
return PTR_ERR(path.mnt);
error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd);
if (error)
goto err;
for (;;) {
q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
name[sizeof(name) - 1] = '\0';
q.hash = full_name_hash(q.name, q.len);
path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt);
if (!IS_ERR(path.dentry))
break;
error = PTR_ERR(path.dentry);
if (error != -EEXIST) {
printk(KERN_INFO "RPC: Couldn't create pipefs entry"
" %s/%s, error %d\n",
dir_name, name, error);
goto err_path_put;
}
}
path_put(&nd.path);
clnt->cl_path = path;
return 0;
err_path_put:
path_put(&nd.path);
err:
rpc_put_mount();
return error;
}
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
{
struct rpc_program *program = args->program;
struct rpc_version *version;
struct rpc_clnt *clnt = NULL;
struct rpc_auth *auth;
int err;
size_t len;
/* sanity check the name before trying to print it */
err = -EINVAL;
len = strlen(args->servername);
if (len > RPC_MAXNETNAMELEN)
goto out_no_rpciod;
len++;
dprintk("RPC: creating %s client for %s (xprt %p)\n",
program->name, args->servername, xprt);
err = rpciod_up();
if (err)
goto out_no_rpciod;
err = -EINVAL;
if (!xprt)
goto out_no_xprt;
if (args->version >= program->nrvers)
goto out_err;
version = program->version[args->version];
if (version == NULL)
goto out_err;
err = -ENOMEM;
clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
goto out_err;
clnt->cl_parent = clnt;
clnt->cl_server = clnt->cl_inline_name;
if (len > sizeof(clnt->cl_inline_name)) {
char *buf = kmalloc(len, GFP_KERNEL);
if (buf != NULL)
clnt->cl_server = buf;
else
len = sizeof(clnt->cl_inline_name);
}
strlcpy(clnt->cl_server, args->servername, len);
clnt->cl_xprt = xprt;
clnt->cl_procinfo = version->procs;
clnt->cl_maxproc = version->nrprocs;
clnt->cl_protname = program->name;
clnt->cl_prog = args->prognumber ? : program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
clnt->cl_metrics = rpc_alloc_iostats(clnt);
err = -ENOMEM;
if (clnt->cl_metrics == NULL)
goto out_no_stats;
clnt->cl_program = program;
INIT_LIST_HEAD(&clnt->cl_tasks);
spin_lock_init(&clnt->cl_lock);
if (!xprt_bound(clnt->cl_xprt))
clnt->cl_autobind = 1;
clnt->cl_timeout = xprt->timeout;
if (args->timeout != NULL) {
memcpy(&clnt->cl_timeout_default, args->timeout,
sizeof(clnt->cl_timeout_default));
clnt->cl_timeout = &clnt->cl_timeout_default;
}
clnt->cl_rtt = &clnt->cl_rtt_default;
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
clnt->cl_principal = NULL;
if (args->client_name) {
clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL);
if (!clnt->cl_principal)
goto out_no_principal;
}
atomic_set(&clnt->cl_count, 1);
err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
if (err < 0)
goto out_no_path;
auth = rpcauth_create(args->authflavor, clnt);
if (IS_ERR(auth)) {
printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
args->authflavor);
err = PTR_ERR(auth);
goto out_no_auth;
}
/* save the nodename */
clnt->cl_nodelen = strlen(init_utsname()->nodename);
if (clnt->cl_nodelen > UNX_MAXNODENAME)
clnt->cl_nodelen = UNX_MAXNODENAME;
memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
rpc_register_client(clnt);
return clnt;
out_no_auth:
if (!IS_ERR(clnt->cl_path.dentry)) {
rpc_remove_client_dir(clnt->cl_path.dentry);
rpc_put_mount();
}
out_no_path:
kfree(clnt->cl_principal);
out_no_principal:
rpc_free_iostats(clnt->cl_metrics);
out_no_stats:
if (clnt->cl_server != clnt->cl_inline_name)
kfree(clnt->cl_server);
kfree(clnt);
out_err:
xprt_put(xprt);
out_no_xprt:
rpciod_down();
out_no_rpciod:
return ERR_PTR(err);
}
/*
* rpc_create - create an RPC client and transport with one call
* @args: rpc_clnt create argument structure
*
* Creates and initializes an RPC transport and an RPC client.
*
* It can ping the server in order to determine if it is up, and to see if
* it supports this program and version. RPC_CLNT_CREATE_NOPING disables
* this behavior so asynchronous tasks can also use rpc_create.
*/
struct rpc_clnt *rpc_create(struct rpc_create_args *args)
{
struct rpc_xprt *xprt;
struct rpc_clnt *clnt;
struct xprt_create xprtargs = {
.net = args->net,
.ident = args->protocol,
.srcaddr = args->saddress,
.dstaddr = args->address,
.addrlen = args->addrsize,
.bc_xprt = args->bc_xprt,
};
char servername[48];
/*
* If the caller chooses not to specify a hostname, whip
* up a string representation of the passed-in address.
*/
if (args->servername == NULL) {
struct sockaddr_un *sun =
(struct sockaddr_un *)args->address;
struct sockaddr_in *sin =
(struct sockaddr_in *)args->address;
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)args->address;
servername[0] = '\0';
switch (args->address->sa_family) {
case AF_LOCAL:
snprintf(servername, sizeof(servername), "%s",
sun->sun_path);
break;
case AF_INET:
snprintf(servername, sizeof(servername), "%pI4",
&sin->sin_addr.s_addr);
break;
case AF_INET6:
snprintf(servername, sizeof(servername), "%pI6",
&sin6->sin6_addr);
break;
default:
/* caller wants default server name, but
* address family isn't recognized. */
return ERR_PTR(-EINVAL);
}
args->servername = servername;
}
xprt = xprt_create_transport(&xprtargs);
if (IS_ERR(xprt))
return (struct rpc_clnt *)xprt;
/*
* By default, kernel RPC client connects from a reserved port.
* CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
* but it is always enabled for rpciod, which handles the connect
* operation.
*/
xprt->resvport = 1;
if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
xprt->resvport = 0;
clnt = rpc_new_client(args, xprt);
if (IS_ERR(clnt))
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
int err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
}
}
clnt->cl_softrtry = 1;
if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
clnt->cl_softrtry = 0;
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
clnt->cl_autobind = 1;
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
clnt->cl_discrtry = 1;
if (!(args->flags & RPC_CLNT_CREATE_QUIET))
clnt->cl_chatty = 1;
return clnt;
}
EXPORT_SYMBOL_GPL(rpc_create);
/*
* This function clones the RPC client structure. It allows us to share the
* same transport while varying parameters such as the authentication
* flavour.
*/
struct rpc_clnt *
rpc_clone_client(struct rpc_clnt *clnt)
{
struct rpc_clnt *new;
int err = -ENOMEM;
new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
if (!new)
goto out_no_clnt;
new->cl_parent = clnt;
/* Turn off autobind on clones */
new->cl_autobind = 0;
INIT_LIST_HEAD(&new->cl_tasks);
spin_lock_init(&new->cl_lock);
rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval);
new->cl_metrics = rpc_alloc_iostats(clnt);
if (new->cl_metrics == NULL)
goto out_no_stats;
if (clnt->cl_principal) {
new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL);
if (new->cl_principal == NULL)
goto out_no_principal;
}
atomic_set(&new->cl_count, 1);
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
goto out_no_path;
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
xprt_get(clnt->cl_xprt);
atomic_inc(&clnt->cl_count);
rpc_register_client(new);
rpciod_up();
return new;
out_no_path:
kfree(new->cl_principal);
out_no_principal:
rpc_free_iostats(new->cl_metrics);
out_no_stats:
kfree(new);
out_no_clnt:
dprintk("RPC: %s: returned error %d\n", __func__, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(rpc_clone_client);
/*
* Kill all tasks for the given client.
* XXX: kill their descendants as well?
*/
void rpc_killall_tasks(struct rpc_clnt *clnt)
{
struct rpc_task *rovr;
if (list_empty(&clnt->cl_tasks))
return;
dprintk("RPC: killing all tasks for client %p\n", clnt);
/*
* Spin lock all_tasks to prevent changes...
*/
spin_lock(&clnt->cl_lock);
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
if (!RPC_IS_ACTIVATED(rovr))
continue;
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
if (RPC_IS_QUEUED(rovr))
rpc_wake_up_queued_task(rovr->tk_waitqueue,
rovr);
}
}
spin_unlock(&clnt->cl_lock);
}
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
/*
* Properly shut down an RPC client, terminating all outstanding
* requests.
*/
void rpc_shutdown_client(struct rpc_clnt *clnt)
{
dprintk("RPC: shutting down %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
while (!list_empty(&clnt->cl_tasks)) {
rpc_killall_tasks(clnt);
wait_event_timeout(destroy_wait,
list_empty(&clnt->cl_tasks), 1*HZ);
}
rpc_release_client(clnt);
}
EXPORT_SYMBOL_GPL(rpc_shutdown_client);
/*
* Free an RPC client
*/
static void
rpc_free_client(struct rpc_clnt *clnt)
{
dprintk("RPC: destroying %s client for %s\n",
clnt->cl_protname, clnt->cl_server);
if (!IS_ERR(clnt->cl_path.dentry)) {
rpc_remove_client_dir(clnt->cl_path.dentry);
rpc_put_mount();
}
if (clnt->cl_parent != clnt) {
rpc_release_client(clnt->cl_parent);
goto out_free;
}
if (clnt->cl_server != clnt->cl_inline_name)
kfree(clnt->cl_server);
out_free:
rpc_unregister_client(clnt);
rpc_free_iostats(clnt->cl_metrics);
kfree(clnt->cl_principal);
clnt->cl_metrics = NULL;
xprt_put(clnt->cl_xprt);
rpciod_down();
kfree(clnt);
}
/*
* Free an RPC client
*/
static void
rpc_free_auth(struct rpc_clnt *clnt)
{
if (clnt->cl_auth == NULL) {
rpc_free_client(clnt);
return;
}
/*
* Note: RPCSEC_GSS may need to send NULL RPC calls in order to
* release remaining GSS contexts. This mechanism ensures
* that it can do so safely.
*/
atomic_inc(&clnt->cl_count);
rpcauth_release(clnt->cl_auth);
clnt->cl_auth = NULL;
if (atomic_dec_and_test(&clnt->cl_count))
rpc_free_client(clnt);
}
/*
* Release reference to the RPC client
*/
void
rpc_release_client(struct rpc_clnt *clnt)
{
dprintk("RPC: rpc_release_client(%p)\n", clnt);
if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait);
if (atomic_dec_and_test(&clnt->cl_count))
rpc_free_auth(clnt);
}
/**
* rpc_bind_new_program - bind a new RPC program to an existing client
* @old: old rpc_client
* @program: rpc program to set
* @vers: rpc program version
*
* Clones the rpc client and sets up a new RPC program. This is mainly
* of use for enabling different RPC programs to share the same transport.
* The Sun NFSv2/v3 ACL protocol can do this.
*/
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
struct rpc_program *program,
u32 vers)
{
struct rpc_clnt *clnt;
struct rpc_version *version;
int err;
BUG_ON(vers >= program->nrvers || !program->version[vers]);
version = program->version[vers];
clnt = rpc_clone_client(old);
if (IS_ERR(clnt))
goto out;
clnt->cl_procinfo = version->procs;
clnt->cl_maxproc = version->nrprocs;
clnt->cl_protname = program->name;
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
}
out:
return clnt;
}
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
void rpc_task_release_client(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
if (clnt != NULL) {
/* Remove from client task list */
spin_lock(&clnt->cl_lock);
list_del(&task->tk_task);
spin_unlock(&clnt->cl_lock);
task->tk_client = NULL;
rpc_release_client(clnt);
}
}
static
void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
{
if (clnt != NULL) {
rpc_task_release_client(task);
task->tk_client = clnt;
atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
/* Add to the client's list of all tasks */
spin_lock(&clnt->cl_lock);
list_add_tail(&task->tk_task, &clnt->cl_tasks);
spin_unlock(&clnt->cl_lock);
}
}
void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
{
rpc_task_release_client(task);
rpc_task_set_client(task, clnt);
}
EXPORT_SYMBOL_GPL(rpc_task_reset_client);
static void
rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
{
if (msg != NULL) {
task->tk_msg.rpc_proc = msg->rpc_proc;
task->tk_msg.rpc_argp = msg->rpc_argp;
task->tk_msg.rpc_resp = msg->rpc_resp;
if (msg->rpc_cred != NULL)
task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
}
}
/*
* Default callback for async RPC calls
*/
static void
rpc_default_callback(struct rpc_task *task, void *data)
{
}
static const struct rpc_call_ops rpc_default_ops = {
.rpc_call_done = rpc_default_callback,
};
/**
* rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
* @task_setup_data: pointer to task initialisation data
*/
struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
{
struct rpc_task *task;
task = rpc_new_task(task_setup_data);
if (IS_ERR(task))
goto out;
rpc_task_set_client(task, task_setup_data->rpc_client);
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
if (task->tk_action == NULL)
rpc_call_start(task);
atomic_inc(&task->tk_count);
rpc_execute(task);
out:
return task;
}
EXPORT_SYMBOL_GPL(rpc_run_task);
/**
* rpc_call_sync - Perform a synchronous RPC call
* @clnt: pointer to RPC client
* @msg: RPC call parameters
* @flags: RPC call flags
*/
int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
{
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = &rpc_default_ops,
.flags = flags,
};
int status;
BUG_ON(flags & RPC_TASK_ASYNC);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
status = task->tk_status;
rpc_put_task(task);
return status;
}
EXPORT_SYMBOL_GPL(rpc_call_sync);
/**
* rpc_call_async - Perform an asynchronous RPC call
* @clnt: pointer to RPC client
* @msg: RPC call parameters
* @flags: RPC call flags
* @tk_ops: RPC call ops
* @data: user call data
*/
int
rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
const struct rpc_call_ops *tk_ops, void *data)
{
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_message = msg,
.callback_ops = tk_ops,
.callback_data = data,
.flags = flags|RPC_TASK_ASYNC,
};
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
return 0;
}
EXPORT_SYMBOL_GPL(rpc_call_async);
#if defined(CONFIG_NFS_V4_1)
/**
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
* @req: RPC request
* @tk_ops: RPC call ops
*/
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *tk_ops)
{
struct rpc_task *task;
struct xdr_buf *xbufp = &req->rq_snd_buf;
struct rpc_task_setup task_setup_data = {
.callback_ops = tk_ops,
};
dprintk("RPC: rpc_run_bc_task req= %p\n", req);
/*
* Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data);
if (IS_ERR(task)) {
xprt_free_bc_request(req);
goto out;
}
task->tk_rqstp = req;
/*
* Set up the xdr_buf length.
* This also indicates that the buffer is XDR encoded already.
*/
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
xbufp->tail[0].iov_len;
task->tk_action = call_bc_transmit;
atomic_inc(&task->tk_count);
BUG_ON(atomic_read(&task->tk_count) != 2);
rpc_execute(task);
out:
dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
return task;
}
#endif /* CONFIG_NFS_V4_1 */
void
rpc_call_start(struct rpc_task *task)
{
task->tk_action = call_start;
}
EXPORT_SYMBOL_GPL(rpc_call_start);
/**
* rpc_peeraddr - extract remote peer address from clnt's xprt
* @clnt: RPC client structure
* @buf: target buffer
* @bufsize: length of target buffer
*
* Returns the number of bytes that are actually in the stored address.
*/
size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
{
size_t bytes;
struct rpc_xprt *xprt = clnt->cl_xprt;
bytes = sizeof(xprt->addr);
if (bytes > bufsize)
bytes = bufsize;
memcpy(buf, &clnt->cl_xprt->addr, bytes);
return xprt->addrlen;
}
EXPORT_SYMBOL_GPL(rpc_peeraddr);
/**
* rpc_peeraddr2str - return remote peer address in printable format
* @clnt: RPC client structure
* @format: address format
*
*/
const char *rpc_peeraddr2str(struct rpc_clnt *clnt,
enum rpc_display_format_t format)
{
struct rpc_xprt *xprt = clnt->cl_xprt;
if (xprt->address_strings[format] != NULL)
return xprt->address_strings[format];
else
return "unprintable";
}
EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
void
rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
{
struct rpc_xprt *xprt = clnt->cl_xprt;
if (xprt->ops->set_buffer_size)
xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
}
EXPORT_SYMBOL_GPL(rpc_setbufsize);
/*
* Return size of largest payload RPC client can support, in bytes
*
* For stream transports, this is one RPC record fragment (see RFC
* 1831), as we don't support multi-record requests yet. For datagram
* transports, this is the size of an IP packet minus the IP, UDP, and
* RPC header sizes.
*/
size_t rpc_max_payload(struct rpc_clnt *clnt)
{
return clnt->cl_xprt->max_payload;
}
EXPORT_SYMBOL_GPL(rpc_max_payload);
/**
* rpc_force_rebind - force transport to check that remote port is unchanged
* @clnt: client to rebind
*
*/
void rpc_force_rebind(struct rpc_clnt *clnt)
{
if (clnt->cl_autobind)
xprt_clear_bound(clnt->cl_xprt);
}
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/*
* Restart an (async) RPC call from the call_prepare state.
* Usually called from within the exit handler.
*/
int
rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = rpc_prepare_task;
return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
*/
int
rpc_restart_call(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = call_start;
return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call);
#ifdef RPC_DEBUG
static const char *rpc_proc_name(const struct rpc_task *task)
{
const struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
if (proc) {
if (proc->p_name)
return proc->p_name;
else
return "NULL";
} else
return "no proc";
}
#endif
/*
* 0. Initial state
*
* Other FSM states can be visited zero or more times, but
* this state is visited exactly once for each RPC.
*/
static void
call_start(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
clnt->cl_protname, clnt->cl_vers,
rpc_proc_name(task),
(RPC_IS_ASYNC(task) ? "async" : "sync"));
/* Increment call count */
task->tk_msg.rpc_proc->p_count++;
clnt->cl_stats->rpccnt++;
task->tk_action = call_reserve;
}
/*
* 1. Reserve an RPC call slot
*/
static void
call_reserve(struct rpc_task *task)
{
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_reserveresult;
xprt_reserve(task);
}
/*
* 1b. Grok the result of xprt_reserve()
*/
static void
call_reserveresult(struct rpc_task *task)
{
int status = task->tk_status;
dprint_status(task);
/*
* After a call to xprt_reserve(), we must have either
* a request slot or else an error status.
*/
task->tk_status = 0;
if (status >= 0) {
if (task->tk_rqstp) {
task->tk_action = call_refresh;
return;
}
printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
__func__, status);
rpc_exit(task, -EIO);
return;
}
/*
* Even though there was an error, we may have acquired
* a request slot somehow. Make sure not to leak it.
*/
if (task->tk_rqstp) {
printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
__func__, status);
xprt_release(task);
}
switch (status) {
case -EAGAIN: /* woken up; retry */
task->tk_action = call_reserve;
return;
case -EIO: /* probably a shutdown */
break;
default:
printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
__func__, status);
break;
}
rpc_exit(task, status);
}
/*
* 2. Bind and/or refresh the credentials
*/
static void
call_refresh(struct rpc_task *task)
{
dprint_status(task);
task->tk_action = call_refreshresult;
task->tk_status = 0;
task->tk_client->cl_stats->rpcauthrefresh++;
rpcauth_refreshcred(task);
}
/*
* 2a. Process the results of a credential refresh
*/
static void
call_refreshresult(struct rpc_task *task)
{
int status = task->tk_status;
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_refresh;
switch (status) {
case 0:
if (rpcauth_uptodatecred(task))
task->tk_action = call_allocate;
return;
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
case -EAGAIN:
status = -EACCES;
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry refresh creds\n",
task->tk_pid, __func__);
return;
}
dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
task->tk_pid, __func__, status);
rpc_exit(task, status);
}
/*
* 2b. Allocate the buffer. For details, see sched.c:rpc_malloc.
* (Note: buffer memory is freed in xprt_release).
*/
static void
call_allocate(struct rpc_task *task)
{
unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_bind;
if (req->rq_buffer)
return;
if (proc->p_proc != 0) {
BUG_ON(proc->p_arglen == 0);
if (proc->p_decode != NULL)
BUG_ON(proc->p_replen == 0);
}
/*
* Calculate the size (in quads) of the RPC call
* and reply headers, and convert both values
* to byte sizes.
*/
req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen;
req->rq_callsize <<= 2;
req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen;
req->rq_rcvsize <<= 2;
req->rq_buffer = xprt->ops->buf_alloc(task,
req->rq_callsize + req->rq_rcvsize);
if (req->rq_buffer != NULL)
return;
dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
if (RPC_IS_ASYNC(task) || !signalled()) {
task->tk_action = call_allocate;
rpc_delay(task, HZ>>4);
return;
}
rpc_exit(task, -ERESTARTSYS);
}
static inline int
rpc_task_need_encode(struct rpc_task *task)
{
return task->tk_rqstp->rq_snd_buf.len == 0;
}
static inline void
rpc_task_force_reencode(struct rpc_task *task)
{
task->tk_rqstp->rq_snd_buf.len = 0;
task->tk_rqstp->rq_bytes_sent = 0;
}
static inline void
rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
{
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
buf->page_len = 0;
buf->flags = 0;
buf->len = 0;
buf->buflen = len;
}
/*
* 3. Encode arguments of an RPC call
*/
static void
rpc_xdr_encode(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
kxdreproc_t encode;
__be32 *p;
dprint_status(task);
rpc_xdr_buf_init(&req->rq_snd_buf,
req->rq_buffer,
req->rq_callsize);
rpc_xdr_buf_init(&req->rq_rcv_buf,
(char *)req->rq_buffer + req->rq_callsize,
req->rq_rcvsize);
p = rpc_encode_header(task);
if (p == NULL) {
printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
rpc_exit(task, -EIO);
return;
}
encode = task->tk_msg.rpc_proc->p_encode;
if (encode == NULL)
return;
task->tk_status = rpcauth_wrap_req(task, encode, req, p,
task->tk_msg.rpc_argp);
}
/*
* 4. Get the server port number if not yet set
*/
static void
call_bind(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
dprint_status(task);
task->tk_action = call_connect;
if (!xprt_bound(xprt)) {
task->tk_action = call_bind_status;
task->tk_timeout = xprt->bind_timeout;
xprt->ops->rpcbind(task);
}
}
/*
* 4a. Sort out bind result
*/
static void
call_bind_status(struct rpc_task *task)
{
int status = -EIO;
if (task->tk_status >= 0) {
dprint_status(task);
task->tk_status = 0;
task->tk_action = call_connect;
return;
}
switch (task->tk_status) {
case -ENOMEM:
dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
rpc_delay(task, HZ >> 2);
goto retry_timeout;
case -EACCES:
dprintk("RPC: %5u remote rpcbind: RPC program/version "
"unavailable\n", task->tk_pid);
/* fail immediately if this is an RPC ping */
if (task->tk_msg.rpc_proc->p_proc == 0) {
status = -EOPNOTSUPP;
break;
}
if (task->tk_rebind_retry == 0)
break;
task->tk_rebind_retry--;
rpc_delay(task, 3*HZ);
goto retry_timeout;
case -ETIMEDOUT:
dprintk("RPC: %5u rpcbind request timed out\n",
task->tk_pid);
goto retry_timeout;
case -EPFNOSUPPORT:
/* server doesn't support any rpcbind version we know of */
dprintk("RPC: %5u unrecognized remote rpcbind service\n",
task->tk_pid);
break;
case -EPROTONOSUPPORT:
dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n",
task->tk_pid);
task->tk_status = 0;
task->tk_action = call_bind;
return;
case -ECONNREFUSED: /* connection problems */
case -ECONNRESET:
case -ENOTCONN:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
case -EPIPE:
dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
task->tk_pid, task->tk_status);
if (!RPC_IS_SOFTCONN(task)) {
rpc_delay(task, 5*HZ);
goto retry_timeout;
}
status = task->tk_status;
break;
default:
dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
task->tk_pid, -task->tk_status);
}
rpc_exit(task, status);
return;
retry_timeout:
task->tk_action = call_timeout;
}
/*
* 4b. Connect to the RPC server
*/
static void
call_connect(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
dprintk("RPC: %5u call_connect xprt %p %s connected\n",
task->tk_pid, xprt,
(xprt_connected(xprt) ? "is" : "is not"));
task->tk_action = call_transmit;
if (!xprt_connected(xprt)) {
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
xprt_connect(task);
}
}
/*
* 4c. Sort out connect result
*/
static void
call_connect_status(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
dprint_status(task);
task->tk_status = 0;
if (status >= 0 || status == -EAGAIN) {
clnt->cl_stats->netreconn++;
task->tk_action = call_transmit;
return;
}
switch (status) {
/* if soft mounted, test if we've timed out */
case -ETIMEDOUT:
task->tk_action = call_timeout;
break;
default:
rpc_exit(task, -EIO);
}
}
/*
* 5. Transmit the RPC request, and wait for reply
*/
static void
call_transmit(struct rpc_task *task)
{
dprint_status(task);
task->tk_action = call_status;
if (task->tk_status < 0)
return;
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status != 0)
return;
task->tk_action = call_transmit_status;
/* Encode here so that rpcsec_gss can use correct sequence number. */
if (rpc_task_need_encode(task)) {
BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
rpc_xdr_encode(task);
/* Did the encode result in an error condition? */
if (task->tk_status != 0) {
/* Was the error nonfatal? */
if (task->tk_status == -EAGAIN)
rpc_delay(task, HZ >> 4);
else
rpc_exit(task, task->tk_status);
return;
}
}
xprt_transmit(task);
if (task->tk_status < 0)
return;
/*
* On success, ensure that we call xprt_end_transmit() before sleeping
* in order to allow access to the socket to other RPC requests.
*/
call_transmit_status(task);
if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
}
/*
* 5a. Handle cleanup after a transmission
*/
static void
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
/*
* Common case: success. Force the compiler to put this
* test first.
*/
if (task->tk_status == 0) {
xprt_end_transmit(task);
rpc_task_force_reencode(task);
return;
}
switch (task->tk_status) {
case -EAGAIN:
break;
default:
dprint_status(task);
xprt_end_transmit(task);
rpc_task_force_reencode(task);
break;
/*
* Special cases: if we've been waiting on the
* socket's write_space() callback, or if the
* socket just returned a connection error,
* then hold onto the transport lock.
*/
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
if (RPC_IS_SOFTCONN(task)) {
xprt_end_transmit(task);
rpc_exit(task, task->tk_status);
break;
}
case -ECONNRESET:
case -ENOTCONN:
case -EPIPE:
rpc_task_force_reencode(task);
}
}
#if defined(CONFIG_NFS_V4_1)
/*
* 5b. Send the backchannel RPC reply. On error, drop the reply. In
* addition, disconnect on connectivity errors.
*/
static void
call_bc_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
BUG_ON(task->tk_status != 0);
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status == -EAGAIN) {
/*
* Could not reserve the transport. Try again after the
* transport is released.
*/
task->tk_status = 0;
task->tk_action = call_bc_transmit;
return;
}
task->tk_action = rpc_exit_task;
if (task->tk_status < 0) {
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
return;
}
xprt_transmit(task);
xprt_end_transmit(task);
dprint_status(task);
switch (task->tk_status) {
case 0:
/* Success */
break;
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
case -ETIMEDOUT:
/*
* Problem reaching the server. Disconnect and let the
* forechannel reestablish the connection. The server will
* have to retransmit the backchannel request and we'll
* reprocess it. Since these ops are idempotent, there's no
* need to cache our reply at this time.
*/
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
break;
default:
/*
* We were unable to reply and will have to drop the
* request. The server should reconnect and retransmit.
*/
BUG_ON(task->tk_status == -EAGAIN);
printk(KERN_NOTICE "RPC: Could not send backchannel reply "
"error: %d\n", task->tk_status);
break;
}
rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
}
#endif /* CONFIG_NFS_V4_1 */
/*
* 6. Sort out the RPC call status
*/
static void
call_status(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
int status;
if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_reply_bytes_recvd;
dprint_status(task);
status = task->tk_status;
if (status >= 0) {
task->tk_action = call_decode;
return;
}
task->tk_status = 0;
switch(status) {
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
/*
* Delay any retries for 3 seconds, then handle as if it
* were a timeout.
*/
rpc_delay(task, 3*HZ);
case -ETIMEDOUT:
task->tk_action = call_timeout;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
break;
case -ECONNRESET:
case -ECONNREFUSED:
rpc_force_rebind(clnt);
rpc_delay(task, 3*HZ);
case -EPIPE:
case -ENOTCONN:
task->tk_action = call_bind;
break;
case -EAGAIN:
task->tk_action = call_transmit;
break;
case -EIO:
/* shutdown or soft timeout */
rpc_exit(task, status);
break;
default:
if (clnt->cl_chatty)
printk("%s: RPC call returned error %d\n",
clnt->cl_protname, -status);
rpc_exit(task, status);
}
}
/*
* 6a. Handle RPC timeout
* We do not release the request slot, so we keep using the
* same XID for all retransmits.
*/
static void
call_timeout(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
goto retry;
}
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
task->tk_timeouts++;
if (RPC_IS_SOFTCONN(task)) {
rpc_exit(task, -ETIMEDOUT);
return;
}
if (RPC_IS_SOFT(task)) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname, clnt->cl_server);
if (task->tk_flags & RPC_TASK_TIMEOUT)
rpc_exit(task, -ETIMEDOUT);
else
rpc_exit(task, -EIO);
return;
}
if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN;
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
clnt->cl_protname, clnt->cl_server);
}
rpc_force_rebind(clnt);
/*
* Did our request time out due to an RPCSEC_GSS out-of-sequence
* event? RFC2203 requires the server to drop all such requests.
*/
rpcauth_invalcred(task);
retry:
clnt->cl_stats->rpcretrans++;
task->tk_action = call_bind;
task->tk_status = 0;
}
/*
* 7. Decode the RPC reply
*/
static void
call_decode(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
dprintk("RPC: %5u call_decode (status %d)\n",
task->tk_pid, task->tk_status);
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s OK\n",
clnt->cl_protname, clnt->cl_server);
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
}
/*
* Ensure that we see all writes made by xprt_complete_rqst()
* before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
req->rq_rcv_buf.len = req->rq_private_buf.len;
/* Check that the softirq receive buffer is valid */
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
sizeof(req->rq_rcv_buf)) != 0);
if (req->rq_rcv_buf.len < 12) {
if (!RPC_IS_SOFT(task)) {
task->tk_action = call_bind;
clnt->cl_stats->rpcretrans++;
goto out_retry;
}
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
task->tk_action = call_timeout;
goto out_retry;
}
p = rpc_verify_header(task);
if (IS_ERR(p)) {
if (p == ERR_PTR(-EAGAIN))
goto out_retry;
return;
}
task->tk_action = rpc_exit_task;
if (decode) {
task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
task->tk_msg.rpc_resp);
}
dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
task->tk_status);
return;
out_retry:
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
}
}
static __be32 *
rpc_encode_header(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_rqst *req = task->tk_rqstp;
__be32 *p = req->rq_svec[0].iov_base;
/* FIXME: check buffer size? */
p = xprt_skip_transport_header(task->tk_xprt, p);
*p++ = req->rq_xid; /* XID */
*p++ = htonl(RPC_CALL); /* CALL */
*p++ = htonl(RPC_VERSION); /* RPC version */
*p++ = htonl(clnt->cl_prog); /* program number */
*p++ = htonl(clnt->cl_vers); /* program version */
*p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
p = rpcauth_marshcred(task, p);
req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
return p;
}
static __be32 *
rpc_verify_header(struct rpc_task *task)
{
struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
__be32 *p = iov->iov_base;
u32 n;
int error = -EACCES;
if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
/* RFC-1014 says that the representation of XDR data must be a
* multiple of four bytes
* - if it isn't pointer subtraction in the NFS client may give
* undefined results
*/
dprintk("RPC: %5u %s: XDR representation not a multiple of"
" 4 bytes: 0x%x\n", task->tk_pid, __func__,
task->tk_rqstp->rq_rcv_buf.len);
goto out_eio;
}
if ((len -= 3) < 0)
goto out_overflow;
p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
task->tk_pid, __func__, n);
goto out_garbage;
}
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_AUTH_ERROR:
break;
case RPC_MISMATCH:
dprintk("RPC: %5u %s: RPC call version "
"mismatch!\n",
task->tk_pid, __func__);
error = -EPROTONOSUPPORT;
goto out_err;
default:
dprintk("RPC: %5u %s: RPC call rejected, "
"unknown error: %x\n",
task->tk_pid, __func__, n);
goto out_eio;
}
if (--len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_AUTH_REJECTEDCRED:
case RPC_AUTH_REJECTEDVERF:
case RPCSEC_GSS_CREDPROBLEM:
case RPCSEC_GSS_CTXPROBLEM:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
dprintk("RPC: %5u %s: retry stale creds\n",
task->tk_pid, __func__);
rpcauth_invalcred(task);
/* Ensure we obtain a new XID! */
xprt_release(task);
task->tk_action = call_reserve;
goto out_retry;
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
/* possibly garbled cred/verf? */
if (!task->tk_garb_retry)
break;
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retry garbled creds\n",
task->tk_pid, __func__);
task->tk_action = call_bind;
goto out_retry;
case RPC_AUTH_TOOWEAK:
printk(KERN_NOTICE "RPC: server %s requires stronger "
"authentication.\n", task->tk_client->cl_server);
break;
default:
dprintk("RPC: %5u %s: unknown auth error: %x\n",
task->tk_pid, __func__, n);
error = -EIO;
}
dprintk("RPC: %5u %s: call rejected %d\n",
task->tk_pid, __func__, n);
goto out_err;
}
if (!(p = rpcauth_checkverf(task, p))) {
dprintk("RPC: %5u %s: auth check failed\n",
task->tk_pid, __func__);
goto out_garbage; /* bad verifier, retry */
}
len = p - (__be32 *)iov->iov_base - 1;
if (len < 0)
goto out_overflow;
switch ((n = ntohl(*p++))) {
case RPC_SUCCESS:
return p;
case RPC_PROG_UNAVAIL:
dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
task->tk_pid, __func__,
(unsigned int)task->tk_client->cl_prog,
task->tk_client->cl_server);
error = -EPFNOSUPPORT;
goto out_err;
case RPC_PROG_MISMATCH:
dprintk("RPC: %5u %s: program %u, version %u unsupported by "
"server %s\n", task->tk_pid, __func__,
(unsigned int)task->tk_client->cl_prog,
(unsigned int)task->tk_client->cl_vers,
task->tk_client->cl_server);
error = -EPROTONOSUPPORT;
goto out_err;
case RPC_PROC_UNAVAIL:
dprintk("RPC: %5u %s: proc %s unsupported by program %u, "
"version %u on server %s\n",
task->tk_pid, __func__,
rpc_proc_name(task),
task->tk_client->cl_prog,
task->tk_client->cl_vers,
task->tk_client->cl_server);
error = -EOPNOTSUPP;
goto out_err;
case RPC_GARBAGE_ARGS:
dprintk("RPC: %5u %s: server saw garbage\n",
task->tk_pid, __func__);
break; /* retry */
default:
dprintk("RPC: %5u %s: server accept status: %x\n",
task->tk_pid, __func__, n);
/* Also retry */
}
out_garbage:
task->tk_client->cl_stats->rpcgarbage++;
if (task->tk_garb_retry) {
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retrying\n",
task->tk_pid, __func__);
task->tk_action = call_bind;
out_retry:
return ERR_PTR(-EAGAIN);
}
out_eio:
error = -EIO;
out_err:
rpc_exit(task, error);
dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid,
__func__, error);
return ERR_PTR(error);
out_overflow:
dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid,
__func__);
goto out_garbage;
}
static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
}
static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
{
return 0;
}
static struct rpc_procinfo rpcproc_null = {
.p_encode = rpcproc_encode_null,
.p_decode = rpcproc_decode_null,
};
static int rpc_ping(struct rpc_clnt *clnt)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
};
int err;
msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
put_rpccred(msg.rpc_cred);
return err;
}
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
.rpc_cred = cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
.rpc_message = &msg,
.callback_ops = &rpc_default_ops,
.flags = flags,
};
return rpc_run_task(&task_setup_data);
}
EXPORT_SYMBOL_GPL(rpc_call_null);
#ifdef RPC_DEBUG
static void rpc_show_header(void)
{
printk(KERN_INFO "-pid- flgs status -client- --rqstp- "
"-timeout ---ops--\n");
}
static void rpc_show_task(const struct rpc_clnt *clnt,
const struct rpc_task *task)
{
const char *rpc_waitq = "none";
if (RPC_IS_QUEUED(task))
rpc_waitq = rpc_qname(task->tk_waitqueue);
printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
task->tk_pid, task->tk_flags, task->tk_status,
clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
task->tk_action, rpc_waitq);
}
void rpc_show_tasks(void)
{
struct rpc_clnt *clnt;
struct rpc_task *task;
int header = 0;
spin_lock(&rpc_client_lock);
list_for_each_entry(clnt, &all_clients, cl_clients) {
spin_lock(&clnt->cl_lock);
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
if (!header) {
rpc_show_header();
header++;
}
rpc_show_task(clnt, task);
}
spin_unlock(&clnt->cl_lock);
}
spin_unlock(&rpc_client_lock);
}
#endif
|
167,671,788,213,458 | /*
* linux/net/sunrpc/sched.c
*
* Scheduling for synchronous and asynchronous RPC requests.
*
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
*
* TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/sunrpc/clnt.h>
#include "sunrpc.h"
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_SCHED
#endif
/*
* RPC slabs and memory pools
*/
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
static struct kmem_cache *rpc_task_slabp __read_mostly;
static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
static void __rpc_queue_timer_fn(unsigned long ptr);
/*
* RPC tasks sit here while waiting for conditions to improve.
*/
static struct rpc_wait_queue delay_queue;
/*
* rpciod-related stuff
*/
struct workqueue_struct *rpciod_workqueue;
/*
* Disable the timer for a given RPC task. Should be called with
* queue->lock and bh_disabled in order to avoid races within
* rpc_run_timer().
*/
static void
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (task->tk_timeout == 0)
return;
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
task->tk_timeout = 0;
list_del(&task->u.tk_wait.timer_list);
if (list_empty(&queue->timer_list.list))
del_timer(&queue->timer_list.timer);
}
static void
rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
{
queue->timer_list.expires = expires;
mod_timer(&queue->timer_list.timer, expires);
}
/*
* Set up a timer for the current task.
*/
static void
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (!task->tk_timeout)
return;
dprintk("RPC: %5u setting alarm for %lu ms\n",
task->tk_pid, task->tk_timeout * 1000 / HZ);
task->u.tk_wait.expires = jiffies + task->tk_timeout;
if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
rpc_set_queue_timer(queue, task->u.tk_wait.expires);
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
/*
* Add new request to a priority queue.
*/
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
{
struct list_head *q;
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
q = &queue->tasks[task->tk_priority];
if (unlikely(task->tk_priority > queue->maxpriority))
q = &queue->tasks[queue->maxpriority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
return;
}
}
list_add_tail(&task->u.tk_wait.list, q);
}
/*
* Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
* This should avoid many nasty memory deadlocks and hopefully
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
BUG_ON (RPC_IS_QUEUED(task));
if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue;
queue->qlen++;
rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
/*
* Remove request from a priority queue.
*/
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
struct rpc_task *t;
if (!list_empty(&task->u.tk_wait.links)) {
t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
}
}
/*
* Remove request from queue.
* Note: must be called with spin lock held.
*/
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
__rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
queue->priority = priority;
queue->count = 1 << (priority * 2);
}
static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
{
queue->owner = pid;
queue->nr = RPC_BATCH_COUNT;
}
static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
rpc_set_waitqueue_owner(queue, 0);
}
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
{
int i;
spin_lock_init(&queue->lock);
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
INIT_LIST_HEAD(&queue->tasks[i]);
queue->maxpriority = nr_queues - 1;
rpc_reset_waitqueue_priority(queue);
queue->qlen = 0;
setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
INIT_LIST_HEAD(&queue->timer_list.list);
#ifdef RPC_DEBUG
queue->name = qname;
#endif
}
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, 1);
}
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
{
del_timer_sync(&queue->timer_list.timer);
}
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
}
#ifdef RPC_DEBUG
static void rpc_task_set_debuginfo(struct rpc_task *task)
{
static atomic_t rpc_pid;
task->tk_pid = atomic_inc_return(&rpc_pid);
}
#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
{
}
#endif
static void rpc_set_active(struct rpc_task *task)
{
rpc_task_set_debuginfo(task);
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
}
/*
* Mark an RPC call as having completed by clearing the 'active' bit
* and then waking up all tasks that were sleeping.
*/
static int rpc_complete_task(struct rpc_task *task)
{
void *m = &task->tk_runstate;
wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
unsigned long flags;
int ret;
spin_lock_irqsave(&wq->lock, flags);
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
ret = atomic_dec_and_test(&task->tk_count);
if (waitqueue_active(wq))
__wake_up_locked_key(wq, TASK_NORMAL, &k);
spin_unlock_irqrestore(&wq->lock, flags);
return ret;
}
/*
* Allow callers to wait for completion of an RPC call
*
* Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
* to enforce taking of the wq->lock and hence avoid races with
* rpc_complete_task().
*/
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{
if (action == NULL)
action = rpc_wait_bit_killable;
return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
action, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
* Note: If the task is ASYNC, this must be called with
* the spinlock held to protect the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
rpc_clear_queued(task);
if (rpc_test_and_set_running(task))
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
queue_work(rpciod_workqueue, &task->u.tk_work);
} else
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
/*
* Prepare for sleeping on a wait queue.
* By always appending tasks to the list we ensure FIFO behavior.
* NB: An RPC task will only receive interrupt-driven events as long
* as it's on a wait queue.
*/
static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
__rpc_add_wait_queue(q, task);
BUG_ON(task->tk_callback != NULL);
task->tk_callback = action;
__rpc_add_timer(q, task);
}
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
/* We shouldn't ever put an inactive task to sleep */
BUG_ON(!RPC_IS_ACTIVATED(task));
/*
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
__rpc_sleep_on(q, task, action);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
* @queue: wait queue
* @task: task to be woken up
*
* Caller must hold queue->lock, and have cleared the task queued flag.
*/
static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
/* Has the task been executed yet? If not, we cannot wake it up! */
if (!RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
return;
}
__rpc_remove_wait_queue(queue, task);
rpc_make_runnable(task);
dprintk("RPC: __rpc_wake_up_task done\n");
}
/*
* Wake up a queued task while the queue lock is being held
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
__rpc_do_wake_up_task(queue, task);
}
/*
* Tests whether rpc queue is empty
*/
int rpc_queue_empty(struct rpc_wait_queue *queue)
{
int res;
spin_lock_bh(&queue->lock);
res = queue->qlen;
spin_unlock_bh(&queue->lock);
return res == 0;
}
EXPORT_SYMBOL_GPL(rpc_queue_empty);
/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
spin_lock_bh(&queue->lock);
rpc_wake_up_task_queue_locked(queue, task);
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/*
* Wake up the next task on a priority queue.
*/
static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
{
struct list_head *q;
struct rpc_task *task;
/*
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
if (queue->owner == task->tk_owner) {
if (--queue->nr)
goto out;
list_move_tail(&task->u.tk_wait.list, q);
}
/*
* Check if we need to switch queues.
*/
if (--queue->count)
goto new_owner;
}
/*
* Service the next queue.
*/
do {
if (q == &queue->tasks[0])
q = &queue->tasks[queue->maxpriority];
else
q = q - 1;
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
goto new_queue;
}
} while (q != &queue->tasks[queue->priority]);
rpc_reset_waitqueue_priority(queue);
return NULL;
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
new_owner:
rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
rpc_wake_up_task_queue_locked(queue, task);
return task;
}
/*
* Wake up the next task on the wait queue.
*/
struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
{
struct rpc_task *task = NULL;
dprintk("RPC: wake_up_next(%p \"%s\")\n",
queue, rpc_qname(queue));
spin_lock_bh(&queue->lock);
if (RPC_IS_PRIORITY(queue))
task = __rpc_wake_up_next_priority(queue);
else {
task_for_first(task, &queue->tasks[0])
rpc_wake_up_task_queue_locked(queue, task);
}
spin_unlock_bh(&queue->lock);
return task;
}
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
/**
* rpc_wake_up - wake up all rpc_tasks
* @queue: rpc_wait_queue on which the tasks are sleeping
*
* Grabs queue->lock
*/
void rpc_wake_up(struct rpc_wait_queue *queue)
{
struct rpc_task *task, *next;
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list)
rpc_wake_up_task_queue_locked(queue, task);
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up);
/**
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
* @queue: rpc_wait_queue on which the tasks are sleeping
* @status: status value to set
*
* Grabs queue->lock
*/
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
{
struct rpc_task *task, *next;
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
task->tk_status = status;
rpc_wake_up_task_queue_locked(queue, task);
}
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
static void __rpc_queue_timer_fn(unsigned long ptr)
{
struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
struct rpc_task *task, *n;
unsigned long expires, now, timeo;
spin_lock(&queue->lock);
expires = now = jiffies;
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
timeo = task->u.tk_wait.expires;
if (time_after_eq(now, timeo)) {
dprintk("RPC: %5u timeout\n", task->tk_pid);
task->tk_status = -ETIMEDOUT;
rpc_wake_up_task_queue_locked(queue, task);
continue;
}
if (expires == now || time_after(expires, timeo))
expires = timeo;
}
if (!list_empty(&queue->timer_list.list))
rpc_set_queue_timer(queue, expires);
spin_unlock(&queue->lock);
}
static void __rpc_atrun(struct rpc_task *task)
{
task->tk_status = 0;
}
/*
* Run a task at a later time
*/
void rpc_delay(struct rpc_task *task, unsigned long delay)
{
task->tk_timeout = delay;
rpc_sleep_on(&delay_queue, task, __rpc_atrun);
}
EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
void rpc_exit_task(struct rpc_task *task)
{
task->tk_action = NULL;
if (task->tk_ops->rpc_call_done != NULL) {
task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) {
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
}
}
}
void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
task->tk_action = rpc_exit_task;
if (RPC_IS_QUEUED(task))
rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
EXPORT_SYMBOL_GPL(rpc_exit);
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
if (ops->rpc_release != NULL)
ops->rpc_release(calldata);
}
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
static void __rpc_execute(struct rpc_task *task)
{
struct rpc_wait_queue *queue;
int task_is_async = RPC_IS_ASYNC(task);
int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
BUG_ON(RPC_IS_QUEUED(task));
for (;;) {
/*
* Execute any pending callback.
*/
if (task->tk_callback) {
void (*save_callback)(struct rpc_task *);
/*
* We set tk_callback to NULL before calling it,
* in case it sets the tk_callback field itself:
*/
save_callback = task->tk_callback;
task->tk_callback = NULL;
save_callback(task);
} else {
/*
* Perform the next FSM step.
* tk_action may be NULL when the task has been killed
* by someone else.
*/
if (task->tk_action == NULL)
break;
task->tk_action(task);
}
/*
* Lockless check for whether task is sleeping or not.
*/
if (!RPC_IS_QUEUED(task))
continue;
/*
* The queue->lock protects against races with
* rpc_make_runnable().
*
* Note that once we clear RPC_TASK_RUNNING on an asynchronous
* rpc_task, rpc_make_runnable() can assign it to a
* different workqueue. We therefore cannot assume that the
* rpc_task pointer may still be dereferenced.
*/
queue = task->tk_waitqueue;
spin_lock_bh(&queue->lock);
if (!RPC_IS_QUEUED(task)) {
spin_unlock_bh(&queue->lock);
continue;
}
rpc_clear_running(task);
spin_unlock_bh(&queue->lock);
if (task_is_async)
return;
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED;
rpc_exit(task, -ERESTARTSYS);
}
rpc_set_running(task);
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
}
dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
}
/*
* User-visible entry point to the scheduler.
*
* This may be called recursively if e.g. an async NFS task updates
* the attributes and finds that dirty pages must be flushed.
* NOTE: Upon exit of this function the task is guaranteed to be
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
rpc_make_runnable(task);
if (!RPC_IS_ASYNC(task))
__rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)
{
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
/**
* rpc_malloc - allocate an RPC buffer
* @task: RPC task that will use this buffer
* @size: requested byte size
*
* To prevent rpciod from hanging, this allocator never sleeps,
* returning NULL if the request cannot be serviced immediately.
* The caller can arrange to sleep in a way that is safe for rpciod.
*
* Most requests are 'small' (under 2KiB) and can be serviced from a
* mempool, ensuring that NFS reads and writes can always proceed,
* and that there is good locality of reference for these buffers.
*
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we avoid using GFP_KERNEL.
*/
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
buf = mempool_alloc(rpc_buffer_mempool, gfp);
else
buf = kmalloc(size, gfp);
if (!buf)
return NULL;
buf->len = size;
dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
task->tk_pid, size, buf);
return &buf->data;
}
EXPORT_SYMBOL_GPL(rpc_malloc);
/**
* rpc_free - free buffer allocated via rpc_malloc
* @buffer: buffer to free
*
*/
void rpc_free(void *buffer)
{
size_t size;
struct rpc_buffer *buf;
if (!buffer)
return;
buf = container_of(buffer, struct rpc_buffer, data);
size = buf->len;
dprintk("RPC: freeing buffer of size %zu at %p\n",
size, buf);
if (size <= RPC_BUFFER_MAXSIZE)
mempool_free(buf, rpc_buffer_mempool);
else
kfree(buf);
}
EXPORT_SYMBOL_GPL(rpc_free);
/*
* Creation and deletion of RPC task structures
*/
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
{
memset(task, 0, sizeof(*task));
atomic_set(&task->tk_count, 1);
task->tk_flags = task_setup_data->flags;
task->tk_ops = task_setup_data->callback_ops;
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
/* Initialize workqueue for async tasks */
task->tk_workqueue = task_setup_data->workqueue;
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
/* starting timestamp */
task->tk_start = ktime_get();
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
}
static struct rpc_task *
rpc_alloc_task(void)
{
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
/*
* Create a new task for the specified client.
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
{
struct rpc_task *task = setup_data->task;
unsigned short flags = 0;
if (task == NULL) {
task = rpc_alloc_task();
if (task == NULL) {
rpc_release_calldata(setup_data->callback_ops,
setup_data->callback_data);
return ERR_PTR(-ENOMEM);
}
flags = RPC_TASK_DYNAMIC;
}
rpc_init_task(task, setup_data);
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
return task;
}
static void rpc_free_task(struct rpc_task *task)
{
const struct rpc_call_ops *tk_ops = task->tk_ops;
void *calldata = task->tk_calldata;
if (task->tk_flags & RPC_TASK_DYNAMIC) {
dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
rpc_release_calldata(tk_ops, calldata);
}
static void rpc_async_release(struct work_struct *work)
{
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
}
static void rpc_release_resources_task(struct rpc_task *task)
{
if (task->tk_rqstp)
xprt_release(task);
if (task->tk_msg.rpc_cred) {
put_rpccred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
}
rpc_task_release_client(task);
}
static void rpc_final_put_task(struct rpc_task *task,
struct workqueue_struct *q)
{
if (q != NULL) {
INIT_WORK(&task->u.tk_work, rpc_async_release);
queue_work(q, &task->u.tk_work);
} else
rpc_free_task(task);
}
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
{
if (atomic_dec_and_test(&task->tk_count)) {
rpc_release_resources_task(task);
rpc_final_put_task(task, q);
}
}
void rpc_put_task(struct rpc_task *task)
{
rpc_do_put_task(task, NULL);
}
EXPORT_SYMBOL_GPL(rpc_put_task);
void rpc_put_task_async(struct rpc_task *task)
{
rpc_do_put_task(task, task->tk_workqueue);
}
EXPORT_SYMBOL_GPL(rpc_put_task_async);
static void rpc_release_task(struct rpc_task *task)
{
dprintk("RPC: %5u release task\n", task->tk_pid);
BUG_ON (RPC_IS_QUEUED(task));
rpc_release_resources_task(task);
/*
* Note: at this point we have been removed from rpc_clnt->cl_tasks,
* so it should be safe to use task->tk_count as a test for whether
* or not any other processes still hold references to our rpc_task.
*/
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
/* Wake up anyone who may be waiting for task completion */
if (!rpc_complete_task(task))
return;
} else {
if (!atomic_dec_and_test(&task->tk_count))
return;
}
rpc_final_put_task(task, task->tk_workqueue);
}
int rpciod_up(void)
{
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
}
void rpciod_down(void)
{
module_put(THIS_MODULE);
}
/*
* Start up the rpciod workqueue.
*/
static int rpciod_start(void)
{
struct workqueue_struct *wq;
/*
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
static void rpciod_stop(void)
{
struct workqueue_struct *wq = NULL;
if (rpciod_workqueue == NULL)
return;
dprintk("RPC: destroying workqueue rpciod\n");
wq = rpciod_workqueue;
rpciod_workqueue = NULL;
destroy_workqueue(wq);
}
void
rpc_destroy_mempool(void)
{
rpciod_stop();
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
mempool_destroy(rpc_task_mempool);
if (rpc_task_slabp)
kmem_cache_destroy(rpc_task_slabp);
if (rpc_buffer_slabp)
kmem_cache_destroy(rpc_buffer_slabp);
rpc_destroy_wait_queue(&delay_queue);
}
int
rpc_init_mempool(void)
{
/*
* The following is not strictly a mempool initialisation,
* but there is no harm in doing it here
*/
rpc_init_wait_queue(&delay_queue, "delayq");
if (!rpciod_start())
goto err_nomem;
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
return -ENOMEM;
}
| /*
* linux/net/sunrpc/sched.c
*
* Scheduling for synchronous and asynchronous RPC requests.
*
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
*
* TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/sunrpc/clnt.h>
#include "sunrpc.h"
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_SCHED
#endif
/*
* RPC slabs and memory pools
*/
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
static struct kmem_cache *rpc_task_slabp __read_mostly;
static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
static void __rpc_queue_timer_fn(unsigned long ptr);
/*
* RPC tasks sit here while waiting for conditions to improve.
*/
static struct rpc_wait_queue delay_queue;
/*
* rpciod-related stuff
*/
struct workqueue_struct *rpciod_workqueue;
/*
* Disable the timer for a given RPC task. Should be called with
* queue->lock and bh_disabled in order to avoid races within
* rpc_run_timer().
*/
static void
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (task->tk_timeout == 0)
return;
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
task->tk_timeout = 0;
list_del(&task->u.tk_wait.timer_list);
if (list_empty(&queue->timer_list.list))
del_timer(&queue->timer_list.timer);
}
static void
rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
{
queue->timer_list.expires = expires;
mod_timer(&queue->timer_list.timer, expires);
}
/*
* Set up a timer for the current task.
*/
static void
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (!task->tk_timeout)
return;
dprintk("RPC: %5u setting alarm for %lu ms\n",
task->tk_pid, task->tk_timeout * 1000 / HZ);
task->u.tk_wait.expires = jiffies + task->tk_timeout;
if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
rpc_set_queue_timer(queue, task->u.tk_wait.expires);
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
/*
* Add new request to a priority queue.
*/
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
{
struct list_head *q;
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
q = &queue->tasks[task->tk_priority];
if (unlikely(task->tk_priority > queue->maxpriority))
q = &queue->tasks[queue->maxpriority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
return;
}
}
list_add_tail(&task->u.tk_wait.list, q);
}
/*
* Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
* This should avoid many nasty memory deadlocks and hopefully
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
BUG_ON (RPC_IS_QUEUED(task));
if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue;
queue->qlen++;
rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
/*
* Remove request from a priority queue.
*/
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
struct rpc_task *t;
if (!list_empty(&task->u.tk_wait.links)) {
t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
}
}
/*
* Remove request from queue.
* Note: must be called with spin lock held.
*/
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
__rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
queue->priority = priority;
queue->count = 1 << (priority * 2);
}
static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
{
queue->owner = pid;
queue->nr = RPC_BATCH_COUNT;
}
static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
rpc_set_waitqueue_owner(queue, 0);
}
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
{
int i;
spin_lock_init(&queue->lock);
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
INIT_LIST_HEAD(&queue->tasks[i]);
queue->maxpriority = nr_queues - 1;
rpc_reset_waitqueue_priority(queue);
queue->qlen = 0;
setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
INIT_LIST_HEAD(&queue->timer_list.list);
#ifdef RPC_DEBUG
queue->name = qname;
#endif
}
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, 1);
}
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
{
del_timer_sync(&queue->timer_list.timer);
}
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
}
#ifdef RPC_DEBUG
static void rpc_task_set_debuginfo(struct rpc_task *task)
{
static atomic_t rpc_pid;
task->tk_pid = atomic_inc_return(&rpc_pid);
}
#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
{
}
#endif
static void rpc_set_active(struct rpc_task *task)
{
rpc_task_set_debuginfo(task);
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
}
/*
* Mark an RPC call as having completed by clearing the 'active' bit
* and then waking up all tasks that were sleeping.
*/
static int rpc_complete_task(struct rpc_task *task)
{
void *m = &task->tk_runstate;
wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
unsigned long flags;
int ret;
spin_lock_irqsave(&wq->lock, flags);
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
ret = atomic_dec_and_test(&task->tk_count);
if (waitqueue_active(wq))
__wake_up_locked_key(wq, TASK_NORMAL, &k);
spin_unlock_irqrestore(&wq->lock, flags);
return ret;
}
/*
* Allow callers to wait for completion of an RPC call
*
* Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
* to enforce taking of the wq->lock and hence avoid races with
* rpc_complete_task().
*/
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{
if (action == NULL)
action = rpc_wait_bit_killable;
return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
action, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
* Note: If the task is ASYNC, this must be called with
* the spinlock held to protect the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
rpc_clear_queued(task);
if (rpc_test_and_set_running(task))
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
queue_work(rpciod_workqueue, &task->u.tk_work);
} else
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
/*
* Prepare for sleeping on a wait queue.
* By always appending tasks to the list we ensure FIFO behavior.
* NB: An RPC task will only receive interrupt-driven events as long
* as it's on a wait queue.
*/
static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
__rpc_add_wait_queue(q, task);
BUG_ON(task->tk_callback != NULL);
task->tk_callback = action;
__rpc_add_timer(q, task);
}
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
/* We shouldn't ever put an inactive task to sleep */
BUG_ON(!RPC_IS_ACTIVATED(task));
/*
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
__rpc_sleep_on(q, task, action);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
* @queue: wait queue
* @task: task to be woken up
*
* Caller must hold queue->lock, and have cleared the task queued flag.
*/
static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
/* Has the task been executed yet? If not, we cannot wake it up! */
if (!RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
return;
}
__rpc_remove_wait_queue(queue, task);
rpc_make_runnable(task);
dprintk("RPC: __rpc_wake_up_task done\n");
}
/*
* Wake up a queued task while the queue lock is being held
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
__rpc_do_wake_up_task(queue, task);
}
/*
* Tests whether rpc queue is empty
*/
int rpc_queue_empty(struct rpc_wait_queue *queue)
{
int res;
spin_lock_bh(&queue->lock);
res = queue->qlen;
spin_unlock_bh(&queue->lock);
return res == 0;
}
EXPORT_SYMBOL_GPL(rpc_queue_empty);
/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
spin_lock_bh(&queue->lock);
rpc_wake_up_task_queue_locked(queue, task);
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/*
* Wake up the next task on a priority queue.
*/
static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
{
struct list_head *q;
struct rpc_task *task;
/*
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
if (queue->owner == task->tk_owner) {
if (--queue->nr)
goto out;
list_move_tail(&task->u.tk_wait.list, q);
}
/*
* Check if we need to switch queues.
*/
if (--queue->count)
goto new_owner;
}
/*
* Service the next queue.
*/
do {
if (q == &queue->tasks[0])
q = &queue->tasks[queue->maxpriority];
else
q = q - 1;
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
goto new_queue;
}
} while (q != &queue->tasks[queue->priority]);
rpc_reset_waitqueue_priority(queue);
return NULL;
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
new_owner:
rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
rpc_wake_up_task_queue_locked(queue, task);
return task;
}
/*
* Wake up the next task on the wait queue.
*/
struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
{
struct rpc_task *task = NULL;
dprintk("RPC: wake_up_next(%p \"%s\")\n",
queue, rpc_qname(queue));
spin_lock_bh(&queue->lock);
if (RPC_IS_PRIORITY(queue))
task = __rpc_wake_up_next_priority(queue);
else {
task_for_first(task, &queue->tasks[0])
rpc_wake_up_task_queue_locked(queue, task);
}
spin_unlock_bh(&queue->lock);
return task;
}
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
/**
* rpc_wake_up - wake up all rpc_tasks
* @queue: rpc_wait_queue on which the tasks are sleeping
*
* Grabs queue->lock
*/
void rpc_wake_up(struct rpc_wait_queue *queue)
{
struct rpc_task *task, *next;
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list)
rpc_wake_up_task_queue_locked(queue, task);
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up);
/**
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
* @queue: rpc_wait_queue on which the tasks are sleeping
* @status: status value to set
*
* Grabs queue->lock
*/
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
{
struct rpc_task *task, *next;
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
task->tk_status = status;
rpc_wake_up_task_queue_locked(queue, task);
}
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
static void __rpc_queue_timer_fn(unsigned long ptr)
{
struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
struct rpc_task *task, *n;
unsigned long expires, now, timeo;
spin_lock(&queue->lock);
expires = now = jiffies;
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
timeo = task->u.tk_wait.expires;
if (time_after_eq(now, timeo)) {
dprintk("RPC: %5u timeout\n", task->tk_pid);
task->tk_status = -ETIMEDOUT;
rpc_wake_up_task_queue_locked(queue, task);
continue;
}
if (expires == now || time_after(expires, timeo))
expires = timeo;
}
if (!list_empty(&queue->timer_list.list))
rpc_set_queue_timer(queue, expires);
spin_unlock(&queue->lock);
}
static void __rpc_atrun(struct rpc_task *task)
{
task->tk_status = 0;
}
/*
* Run a task at a later time
*/
void rpc_delay(struct rpc_task *task, unsigned long delay)
{
task->tk_timeout = delay;
rpc_sleep_on(&delay_queue, task, __rpc_atrun);
}
EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
void rpc_exit_task(struct rpc_task *task)
{
task->tk_action = NULL;
if (task->tk_ops->rpc_call_done != NULL) {
task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) {
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
}
}
}
void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
task->tk_action = rpc_exit_task;
if (RPC_IS_QUEUED(task))
rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
EXPORT_SYMBOL_GPL(rpc_exit);
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
if (ops->rpc_release != NULL)
ops->rpc_release(calldata);
}
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
static void __rpc_execute(struct rpc_task *task)
{
struct rpc_wait_queue *queue;
int task_is_async = RPC_IS_ASYNC(task);
int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
BUG_ON(RPC_IS_QUEUED(task));
for (;;) {
/*
* Execute any pending callback.
*/
if (task->tk_callback) {
void (*save_callback)(struct rpc_task *);
/*
* We set tk_callback to NULL before calling it,
* in case it sets the tk_callback field itself:
*/
save_callback = task->tk_callback;
task->tk_callback = NULL;
save_callback(task);
} else {
/*
* Perform the next FSM step.
* tk_action may be NULL when the task has been killed
* by someone else.
*/
if (task->tk_action == NULL)
break;
task->tk_action(task);
}
/*
* Lockless check for whether task is sleeping or not.
*/
if (!RPC_IS_QUEUED(task))
continue;
/*
* The queue->lock protects against races with
* rpc_make_runnable().
*
* Note that once we clear RPC_TASK_RUNNING on an asynchronous
* rpc_task, rpc_make_runnable() can assign it to a
* different workqueue. We therefore cannot assume that the
* rpc_task pointer may still be dereferenced.
*/
queue = task->tk_waitqueue;
spin_lock_bh(&queue->lock);
if (!RPC_IS_QUEUED(task)) {
spin_unlock_bh(&queue->lock);
continue;
}
rpc_clear_running(task);
spin_unlock_bh(&queue->lock);
if (task_is_async)
return;
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED;
rpc_exit(task, -ERESTARTSYS);
}
rpc_set_running(task);
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
}
dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
}
/*
* User-visible entry point to the scheduler.
*
* This may be called recursively if e.g. an async NFS task updates
* the attributes and finds that dirty pages must be flushed.
* NOTE: Upon exit of this function the task is guaranteed to be
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
rpc_make_runnable(task);
if (!RPC_IS_ASYNC(task))
__rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)
{
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
/**
* rpc_malloc - allocate an RPC buffer
* @task: RPC task that will use this buffer
* @size: requested byte size
*
* To prevent rpciod from hanging, this allocator never sleeps,
* returning NULL if the request cannot be serviced immediately.
* The caller can arrange to sleep in a way that is safe for rpciod.
*
* Most requests are 'small' (under 2KiB) and can be serviced from a
* mempool, ensuring that NFS reads and writes can always proceed,
* and that there is good locality of reference for these buffers.
*
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we avoid using GFP_KERNEL.
*/
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
buf = mempool_alloc(rpc_buffer_mempool, gfp);
else
buf = kmalloc(size, gfp);
if (!buf)
return NULL;
buf->len = size;
dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
task->tk_pid, size, buf);
return &buf->data;
}
EXPORT_SYMBOL_GPL(rpc_malloc);
/**
* rpc_free - free buffer allocated via rpc_malloc
* @buffer: buffer to free
*
*/
void rpc_free(void *buffer)
{
size_t size;
struct rpc_buffer *buf;
if (!buffer)
return;
buf = container_of(buffer, struct rpc_buffer, data);
size = buf->len;
dprintk("RPC: freeing buffer of size %zu at %p\n",
size, buf);
if (size <= RPC_BUFFER_MAXSIZE)
mempool_free(buf, rpc_buffer_mempool);
else
kfree(buf);
}
EXPORT_SYMBOL_GPL(rpc_free);
/*
* Creation and deletion of RPC task structures
*/
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
{
memset(task, 0, sizeof(*task));
atomic_set(&task->tk_count, 1);
task->tk_flags = task_setup_data->flags;
task->tk_ops = task_setup_data->callback_ops;
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
task->tk_rebind_retry = 2;
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
/* Initialize workqueue for async tasks */
task->tk_workqueue = task_setup_data->workqueue;
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
/* starting timestamp */
task->tk_start = ktime_get();
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
}
static struct rpc_task *
rpc_alloc_task(void)
{
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
/*
* Create a new task for the specified client.
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
{
struct rpc_task *task = setup_data->task;
unsigned short flags = 0;
if (task == NULL) {
task = rpc_alloc_task();
if (task == NULL) {
rpc_release_calldata(setup_data->callback_ops,
setup_data->callback_data);
return ERR_PTR(-ENOMEM);
}
flags = RPC_TASK_DYNAMIC;
}
rpc_init_task(task, setup_data);
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
return task;
}
static void rpc_free_task(struct rpc_task *task)
{
const struct rpc_call_ops *tk_ops = task->tk_ops;
void *calldata = task->tk_calldata;
if (task->tk_flags & RPC_TASK_DYNAMIC) {
dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
rpc_release_calldata(tk_ops, calldata);
}
static void rpc_async_release(struct work_struct *work)
{
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
}
static void rpc_release_resources_task(struct rpc_task *task)
{
if (task->tk_rqstp)
xprt_release(task);
if (task->tk_msg.rpc_cred) {
put_rpccred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
}
rpc_task_release_client(task);
}
static void rpc_final_put_task(struct rpc_task *task,
struct workqueue_struct *q)
{
if (q != NULL) {
INIT_WORK(&task->u.tk_work, rpc_async_release);
queue_work(q, &task->u.tk_work);
} else
rpc_free_task(task);
}
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
{
if (atomic_dec_and_test(&task->tk_count)) {
rpc_release_resources_task(task);
rpc_final_put_task(task, q);
}
}
void rpc_put_task(struct rpc_task *task)
{
rpc_do_put_task(task, NULL);
}
EXPORT_SYMBOL_GPL(rpc_put_task);
void rpc_put_task_async(struct rpc_task *task)
{
rpc_do_put_task(task, task->tk_workqueue);
}
EXPORT_SYMBOL_GPL(rpc_put_task_async);
static void rpc_release_task(struct rpc_task *task)
{
dprintk("RPC: %5u release task\n", task->tk_pid);
BUG_ON (RPC_IS_QUEUED(task));
rpc_release_resources_task(task);
/*
* Note: at this point we have been removed from rpc_clnt->cl_tasks,
* so it should be safe to use task->tk_count as a test for whether
* or not any other processes still hold references to our rpc_task.
*/
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
/* Wake up anyone who may be waiting for task completion */
if (!rpc_complete_task(task))
return;
} else {
if (!atomic_dec_and_test(&task->tk_count))
return;
}
rpc_final_put_task(task, task->tk_workqueue);
}
int rpciod_up(void)
{
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
}
void rpciod_down(void)
{
module_put(THIS_MODULE);
}
/*
* Start up the rpciod workqueue.
*/
static int rpciod_start(void)
{
struct workqueue_struct *wq;
/*
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
static void rpciod_stop(void)
{
struct workqueue_struct *wq = NULL;
if (rpciod_workqueue == NULL)
return;
dprintk("RPC: destroying workqueue rpciod\n");
wq = rpciod_workqueue;
rpciod_workqueue = NULL;
destroy_workqueue(wq);
}
void
rpc_destroy_mempool(void)
{
rpciod_stop();
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
mempool_destroy(rpc_task_mempool);
if (rpc_task_slabp)
kmem_cache_destroy(rpc_task_slabp);
if (rpc_buffer_slabp)
kmem_cache_destroy(rpc_buffer_slabp);
rpc_destroy_wait_queue(&delay_queue);
}
int
rpc_init_mempool(void)
{
/*
* The following is not strictly a mempool initialisation,
* but there is no harm in doing it here
*/
rpc_init_wait_queue(&delay_queue, "delayq");
if (!rpciod_start())
goto err_nomem;
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
return -ENOMEM;
}
|
190,470,115,965,016 | /*
* Hardware performance events for the Alpha.
*
* We implement HW counts on the EV67 and subsequent CPUs only.
*
* (C) 2010 Michael J. Cree
*
* Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
* ARM code, which are copyright by their respective authors.
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <asm/hwrpb.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pal.h>
#include <asm/wrperfmon.h>
#include <asm/hw_irq.h>
/* The maximum number of PMCs on any Alpha CPU whatsoever. */
#define MAX_HWEVENTS 3
#define PMC_NO_INDEX -1
/* For tracking PMCs and the hw events they monitor on each CPU. */
struct cpu_hw_events {
int enabled;
/* Number of events scheduled; also number entries valid in arrays below. */
int n_events;
/* Number events added since last hw_perf_disable(). */
int n_added;
/* Events currently scheduled. */
struct perf_event *event[MAX_HWEVENTS];
/* Event type of each scheduled event. */
unsigned long evtype[MAX_HWEVENTS];
/* Current index of each scheduled event; if not yet determined
* contains PMC_NO_INDEX.
*/
int current_idx[MAX_HWEVENTS];
/* The active PMCs' config for easy use with wrperfmon(). */
unsigned long config;
/* The active counters' indices for easy use with wrperfmon(). */
unsigned long idx_mask;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
/*
* A structure to hold the description of the PMCs available on a particular
* type of Alpha CPU.
*/
struct alpha_pmu_t {
/* Mapping of the perf system hw event types to indigenous event types */
const int *event_map;
/* The number of entries in the event_map */
int max_events;
/* The number of PMCs on this Alpha */
int num_pmcs;
/*
* All PMC counters reside in the IBOX register PCTR. This is the
* LSB of the counter.
*/
int pmc_count_shift[MAX_HWEVENTS];
/*
* The mask that isolates the PMC bits when the LSB of the counter
* is shifted to bit 0.
*/
unsigned long pmc_count_mask[MAX_HWEVENTS];
/* The maximum period the PMC can count. */
unsigned long pmc_max_period[MAX_HWEVENTS];
/*
* The maximum value that may be written to the counter due to
* hardware restrictions is pmc_max_period - pmc_left.
*/
long pmc_left[3];
/* Subroutine for allocation of PMCs. Enforces constraints. */
int (*check_constraints)(struct perf_event **, unsigned long *, int);
};
/*
* The Alpha CPU PMU description currently in operation. This is set during
* the boot process to the specific CPU of the machine.
*/
static const struct alpha_pmu_t *alpha_pmu;
#define HW_OP_UNSUPPORTED -1
/*
* The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
* follow. Since they are identical we refer to them collectively as the
* EV67 henceforth.
*/
/*
* EV67 PMC event types
*
* There is no one-to-one mapping of the possible hw event types to the
* actual codes that are used to program the PMCs hence we introduce our
* own hw event type identifiers.
*/
enum ev67_pmc_event_type {
EV67_CYCLES = 1,
EV67_INSTRUCTIONS,
EV67_BCACHEMISS,
EV67_MBOXREPLAY,
EV67_LAST_ET
};
#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
/* Mapping of the hw event types to the perf tool interface */
static const int ev67_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
};
struct ev67_mapping_t {
int config;
int idx;
};
/*
* The mapping used for one event only - these must be in same order as enum
* ev67_pmc_event_type definition.
*/
static const struct ev67_mapping_t ev67_mapping[] = {
{EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
{EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
{EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
{EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
};
/*
* Check that a group of events can be simultaneously scheduled on to the
* EV67 PMU. Also allocate counter indices and config.
*/
static int ev67_check_constraints(struct perf_event **event,
unsigned long *evtype, int n_ev)
{
int idx0;
unsigned long config;
idx0 = ev67_mapping[evtype[0]-1].idx;
config = ev67_mapping[evtype[0]-1].config;
if (n_ev == 1)
goto success;
BUG_ON(n_ev != 2);
if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
/* MBOX replay traps must be on PMC 1 */
idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
/* Only cycles can accompany MBOX replay traps */
if (evtype[idx0] == EV67_CYCLES) {
config = EV67_PCTR_CYCLES_MBOX;
goto success;
}
}
if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
/* Bcache misses must be on PMC 1 */
idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
/* Only instructions can accompany Bcache misses */
if (evtype[idx0] == EV67_INSTRUCTIONS) {
config = EV67_PCTR_INSTR_BCACHEMISS;
goto success;
}
}
if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
/* Instructions must be on PMC 0 */
idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
/* By this point only cycles can accompany instructions */
if (evtype[idx0^1] == EV67_CYCLES) {
config = EV67_PCTR_INSTR_CYCLES;
goto success;
}
}
/* Otherwise, darn it, there is a conflict. */
return -1;
success:
event[0]->hw.idx = idx0;
event[0]->hw.config_base = config;
if (n_ev == 2) {
event[1]->hw.idx = idx0 ^ 1;
event[1]->hw.config_base = config;
}
return 0;
}
static const struct alpha_pmu_t ev67_pmu = {
.event_map = ev67_perfmon_event_map,
.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
.num_pmcs = 2,
.pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
.pmc_left = {16, 4, 0},
.check_constraints = ev67_check_constraints
};
/*
* Helper routines to ensure that we read/write only the correct PMC bits
* when calling the wrperfmon PALcall.
*/
static inline void alpha_write_pmc(int idx, unsigned long val)
{
val &= alpha_pmu->pmc_count_mask[idx];
val <<= alpha_pmu->pmc_count_shift[idx];
val |= (1<<idx);
wrperfmon(PERFMON_CMD_WRITE, val);
}
static inline unsigned long alpha_read_pmc(int idx)
{
unsigned long val;
val = wrperfmon(PERFMON_CMD_READ, 0);
val >>= alpha_pmu->pmc_count_shift[idx];
val &= alpha_pmu->pmc_count_mask[idx];
return val;
}
/* Set a new period to sample over */
static int alpha_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
long left = local64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Hardware restrictions require that the counters must not be
* written with values that are too close to the maximum period.
*/
if (unlikely(left < alpha_pmu->pmc_left[idx]))
left = alpha_pmu->pmc_left[idx];
if (left > (long)alpha_pmu->pmc_max_period[idx])
left = alpha_pmu->pmc_max_period[idx];
local64_set(&hwc->prev_count, (unsigned long)(-left));
alpha_write_pmc(idx, (unsigned long)(-left));
perf_event_update_userpage(event);
return ret;
}
/*
* Calculates the count (the 'delta') since the last time the PMC was read.
*
* As the PMCs' full period can easily be exceeded within the perf system
* sampling period we cannot use any high order bits as a guard bit in the
* PMCs to detect overflow as is done by other architectures. The code here
* calculates the delta on the basis that there is no overflow when ovf is
* zero. The value passed via ovf by the interrupt handler corrects for
* overflow.
*
* This can be racey on rare occasions -- a call to this routine can occur
* with an overflowed counter just before the PMI service routine is called.
* The check for delta negative hopefully always rectifies this situation.
*/
static unsigned long alpha_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx, long ovf)
{
long prev_raw_count, new_raw_count;
long delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = alpha_read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
*/
if (unlikely(delta < 0)) {
delta += alpha_pmu->pmc_max_period[idx] + 1;
}
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
/*
* Collect all HW events into the array event[].
*/
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *event[], unsigned long *evtype,
int *current_idx)
{
struct perf_event *pe;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
event[n] = group;
evtype[n] = group->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
list_for_each_entry(pe, &group->sibling_list, group_entry) {
if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
event[n] = pe;
evtype[n] = pe->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
}
return n;
}
/*
* Check that a group of events can be simultaneously scheduled on to the PMU.
*/
static int alpha_check_constraints(struct perf_event **events,
unsigned long *evtypes, int n_ev)
{
/* No HW events is possible from hw_perf_group_sched_in(). */
if (n_ev == 0)
return 0;
if (n_ev > alpha_pmu->num_pmcs)
return -1;
return alpha_pmu->check_constraints(events, evtypes, n_ev);
}
/*
* If new events have been scheduled then update cpuc with the new
* configuration. This may involve shifting cycle counts from one PMC to
* another.
*/
static void maybe_change_configuration(struct cpu_hw_events *cpuc)
{
int j;
if (cpuc->n_added == 0)
return;
/* Find counters that are moving to another PMC and update */
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
if (cpuc->current_idx[j] != PMC_NO_INDEX &&
cpuc->current_idx[j] != pe->hw.idx) {
alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
cpuc->current_idx[j] = PMC_NO_INDEX;
}
}
/* Assign to counters all unassigned events. */
cpuc->idx_mask = 0;
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;
if (cpuc->current_idx[j] == PMC_NO_INDEX) {
alpha_perf_event_set_period(pe, hwc, idx);
cpuc->current_idx[j] = idx;
}
if (!(hwc->state & PERF_HES_STOPPED))
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
/* Schedule perf HW event on to PMU.
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
unsigned long irq_flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
* disable, however this can lead to an overflowed counter with the
* PMI disabled on rare occasions. The alpha_perf_event_update()
* routine should detect this situation by noting a negative delta,
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
/* Default to error to be returned */
ret = -EAGAIN;
/* Insert event on to PMU and if successful modify ret to valid return */
n0 = cpuc->n_events;
if (n0 < alpha_pmu->num_pmcs) {
cpuc->event[n0] = event;
cpuc->evtype[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PMC_NO_INDEX;
if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
}
}
hwc->state = PERF_HES_UPTODATE;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_STOPPED;
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
return ret;
}
/* Disable performance monitoring unit
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
unsigned long irq_flags;
int j;
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
int idx = cpuc->current_idx[j];
/* Shift remaining entries down into the existing
* slot.
*/
while (++j < cpuc->n_events) {
cpuc->event[j - 1] = cpuc->event[j];
cpuc->evtype[j - 1] = cpuc->evtype[j];
cpuc->current_idx[j - 1] =
cpuc->current_idx[j];
}
/* Absorb the final count and turn off the event. */
alpha_perf_event_update(event, hwc, idx, 0);
perf_event_update_userpage(event);
cpuc->idx_mask &= ~(1UL<<idx);
cpuc->n_events--;
break;
}
}
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
}
static void alpha_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
alpha_perf_event_update(event, hwc, hwc->idx, 0);
}
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= ~(1UL<<hwc->idx);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
alpha_perf_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_UPTODATE;
}
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
}
static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
alpha_perf_event_set_period(event, hwc, hwc->idx);
}
hwc->state = 0;
cpuc->idx_mask |= 1UL<<hwc->idx;
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}
/*
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
* EV67 but we don't do suffiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
{
struct percpu_struct *cpu;
unsigned long cputype;
/* Get cpu type from HW */
cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
cputype = cpu->type & 0xffffffff;
/* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Nothing to be done! */
return;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
struct perf_event *evts[MAX_HWEVENTS];
unsigned long evtypes[MAX_HWEVENTS];
int idx_rubbish_bin[MAX_HWEVENTS];
int ev;
int n;
/* We only support a limited range of HARDWARE event types with one
* only programmable via a RAW event type.
*/
if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= alpha_pmu->max_events)
return -EINVAL;
ev = alpha_pmu->event_map[attr->config];
} else if (attr->type == PERF_TYPE_HW_CACHE) {
return -EOPNOTSUPP;
} else if (attr->type == PERF_TYPE_RAW) {
ev = attr->config & 0xff;
} else {
return -EOPNOTSUPP;
}
if (ev < 0) {
return ev;
}
/* The EV67 does not support mode exclusion */
if (attr->exclude_kernel || attr->exclude_user
|| attr->exclude_hv || attr->exclude_idle) {
return -EPERM;
}
/*
* We place the event type in event_base here and leave calculation
* of the codes to programme the PMU for alpha_pmu_enable() because
* it is only then we will know what HW events are actually
* scheduled on to the PMU. At that point the code to programme the
* PMU is put into config_base and the PMC to use is placed into
* idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
* it is yet to be determined.
*/
hwc->event_base = ev;
/* Collect events in a group together suitable for calling
* alpha_check_constraints() to verify that the group as a whole can
* be scheduled on to the PMU.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
alpha_pmu->num_pmcs - 1,
evts, evtypes, idx_rubbish_bin);
if (n < 0)
return -EINVAL;
}
evtypes[n] = hwc->event_base;
evts[n] = event;
if (alpha_check_constraints(evts, evtypes, n + 1))
return -EINVAL;
/* Indicate that PMU config and idx are yet to be determined. */
hwc->config_base = 0;
hwc->idx = PMC_NO_INDEX;
event->destroy = hw_perf_event_destroy;
/*
* Most architectures reserve the PMU for their use at this point.
* As there is no existing mechanism to arbitrate usage and there
* appears to be no other user of the Alpha PMU we just assume
* that we can just use it, hence a NO-OP here.
*
* Maybe an alpha_reserve_pmu() routine should be implemented but is
* anything else ever going to use it?
*/
if (!hwc->sample_period) {
hwc->sample_period = alpha_pmu->pmc_max_period[0];
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Main entry point to initialise a HW performance event.
*/
static int alpha_pmu_event_init(struct perf_event *event)
{
int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!alpha_pmu)
return -ENODEV;
/* Do the real initialisation work. */
err = __hw_perf_event_init(event);
return err;
}
/*
* Main entry point - enable HW performance counters.
*/
static void alpha_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
if (cpuc->n_events > 0) {
/* Update cpuc with information from any new scheduled events. */
maybe_change_configuration(cpuc);
/* Start counting the desired events. */
wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
}
}
/*
* Main entry point - disable HW performance counters.
*/
static void alpha_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}
static struct pmu pmu = {
.pmu_enable = alpha_pmu_enable,
.pmu_disable = alpha_pmu_disable,
.event_init = alpha_pmu_event_init,
.add = alpha_pmu_add,
.del = alpha_pmu_del,
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
};
/*
* Main entry point - don't know when this is called but it
* obviously dumps debug info.
*/
void perf_event_print_debug(void)
{
unsigned long flags;
unsigned long pcr;
int pcr0, pcr1;
int cpu;
if (!supported_cpu())
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = wrperfmon(PERFMON_CMD_READ, 0);
pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
local_irq_restore(flags);
}
/*
* Performance Monitoring Interrupt Service Routine called when a PMC
* overflows. The PMC that overflowed is passed in la_ptr.
*/
static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__get_cpu_var(irq_pmi_count)++;
cpuc = &__get_cpu_var(cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, 1, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
/*
* Init call to initialise performance events at kernel startup.
*/
int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
if (!supported_cpu()) {
pr_cont("No support for your CPU.\n");
return 0;
}
pr_cont("Supported CPU type!\n");
/* Override performance counter IRQ vector */
perf_irq = alpha_perf_event_irq_handler;
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);
| /*
* Hardware performance events for the Alpha.
*
* We implement HW counts on the EV67 and subsequent CPUs only.
*
* (C) 2010 Michael J. Cree
*
* Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
* ARM code, which are copyright by their respective authors.
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <asm/hwrpb.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pal.h>
#include <asm/wrperfmon.h>
#include <asm/hw_irq.h>
/* The maximum number of PMCs on any Alpha CPU whatsoever. */
#define MAX_HWEVENTS 3
#define PMC_NO_INDEX -1
/* For tracking PMCs and the hw events they monitor on each CPU. */
struct cpu_hw_events {
int enabled;
/* Number of events scheduled; also number entries valid in arrays below. */
int n_events;
/* Number events added since last hw_perf_disable(). */
int n_added;
/* Events currently scheduled. */
struct perf_event *event[MAX_HWEVENTS];
/* Event type of each scheduled event. */
unsigned long evtype[MAX_HWEVENTS];
/* Current index of each scheduled event; if not yet determined
* contains PMC_NO_INDEX.
*/
int current_idx[MAX_HWEVENTS];
/* The active PMCs' config for easy use with wrperfmon(). */
unsigned long config;
/* The active counters' indices for easy use with wrperfmon(). */
unsigned long idx_mask;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
/*
* A structure to hold the description of the PMCs available on a particular
* type of Alpha CPU.
*/
struct alpha_pmu_t {
/* Mapping of the perf system hw event types to indigenous event types */
const int *event_map;
/* The number of entries in the event_map */
int max_events;
/* The number of PMCs on this Alpha */
int num_pmcs;
/*
* All PMC counters reside in the IBOX register PCTR. This is the
* LSB of the counter.
*/
int pmc_count_shift[MAX_HWEVENTS];
/*
* The mask that isolates the PMC bits when the LSB of the counter
* is shifted to bit 0.
*/
unsigned long pmc_count_mask[MAX_HWEVENTS];
/* The maximum period the PMC can count. */
unsigned long pmc_max_period[MAX_HWEVENTS];
/*
* The maximum value that may be written to the counter due to
* hardware restrictions is pmc_max_period - pmc_left.
*/
long pmc_left[3];
/* Subroutine for allocation of PMCs. Enforces constraints. */
int (*check_constraints)(struct perf_event **, unsigned long *, int);
};
/*
* The Alpha CPU PMU description currently in operation. This is set during
* the boot process to the specific CPU of the machine.
*/
static const struct alpha_pmu_t *alpha_pmu;
#define HW_OP_UNSUPPORTED -1
/*
* The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
* follow. Since they are identical we refer to them collectively as the
* EV67 henceforth.
*/
/*
* EV67 PMC event types
*
* There is no one-to-one mapping of the possible hw event types to the
* actual codes that are used to program the PMCs hence we introduce our
* own hw event type identifiers.
*/
enum ev67_pmc_event_type {
EV67_CYCLES = 1,
EV67_INSTRUCTIONS,
EV67_BCACHEMISS,
EV67_MBOXREPLAY,
EV67_LAST_ET
};
#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
/* Mapping of the hw event types to the perf tool interface */
static const int ev67_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
};
struct ev67_mapping_t {
int config;
int idx;
};
/*
* The mapping used for one event only - these must be in same order as enum
* ev67_pmc_event_type definition.
*/
static const struct ev67_mapping_t ev67_mapping[] = {
{EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
{EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
{EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
{EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
};
/*
* Check that a group of events can be simultaneously scheduled on to the
* EV67 PMU. Also allocate counter indices and config.
*/
static int ev67_check_constraints(struct perf_event **event,
unsigned long *evtype, int n_ev)
{
int idx0;
unsigned long config;
idx0 = ev67_mapping[evtype[0]-1].idx;
config = ev67_mapping[evtype[0]-1].config;
if (n_ev == 1)
goto success;
BUG_ON(n_ev != 2);
if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
/* MBOX replay traps must be on PMC 1 */
idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
/* Only cycles can accompany MBOX replay traps */
if (evtype[idx0] == EV67_CYCLES) {
config = EV67_PCTR_CYCLES_MBOX;
goto success;
}
}
if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
/* Bcache misses must be on PMC 1 */
idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
/* Only instructions can accompany Bcache misses */
if (evtype[idx0] == EV67_INSTRUCTIONS) {
config = EV67_PCTR_INSTR_BCACHEMISS;
goto success;
}
}
if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
/* Instructions must be on PMC 0 */
idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
/* By this point only cycles can accompany instructions */
if (evtype[idx0^1] == EV67_CYCLES) {
config = EV67_PCTR_INSTR_CYCLES;
goto success;
}
}
/* Otherwise, darn it, there is a conflict. */
return -1;
success:
event[0]->hw.idx = idx0;
event[0]->hw.config_base = config;
if (n_ev == 2) {
event[1]->hw.idx = idx0 ^ 1;
event[1]->hw.config_base = config;
}
return 0;
}
static const struct alpha_pmu_t ev67_pmu = {
.event_map = ev67_perfmon_event_map,
.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
.num_pmcs = 2,
.pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
.pmc_left = {16, 4, 0},
.check_constraints = ev67_check_constraints
};
/*
* Helper routines to ensure that we read/write only the correct PMC bits
* when calling the wrperfmon PALcall.
*/
static inline void alpha_write_pmc(int idx, unsigned long val)
{
val &= alpha_pmu->pmc_count_mask[idx];
val <<= alpha_pmu->pmc_count_shift[idx];
val |= (1<<idx);
wrperfmon(PERFMON_CMD_WRITE, val);
}
static inline unsigned long alpha_read_pmc(int idx)
{
unsigned long val;
val = wrperfmon(PERFMON_CMD_READ, 0);
val >>= alpha_pmu->pmc_count_shift[idx];
val &= alpha_pmu->pmc_count_mask[idx];
return val;
}
/* Set a new period to sample over */
static int alpha_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
long left = local64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Hardware restrictions require that the counters must not be
* written with values that are too close to the maximum period.
*/
if (unlikely(left < alpha_pmu->pmc_left[idx]))
left = alpha_pmu->pmc_left[idx];
if (left > (long)alpha_pmu->pmc_max_period[idx])
left = alpha_pmu->pmc_max_period[idx];
local64_set(&hwc->prev_count, (unsigned long)(-left));
alpha_write_pmc(idx, (unsigned long)(-left));
perf_event_update_userpage(event);
return ret;
}
/*
* Calculates the count (the 'delta') since the last time the PMC was read.
*
* As the PMCs' full period can easily be exceeded within the perf system
* sampling period we cannot use any high order bits as a guard bit in the
* PMCs to detect overflow as is done by other architectures. The code here
* calculates the delta on the basis that there is no overflow when ovf is
* zero. The value passed via ovf by the interrupt handler corrects for
* overflow.
*
* This can be racey on rare occasions -- a call to this routine can occur
* with an overflowed counter just before the PMI service routine is called.
* The check for delta negative hopefully always rectifies this situation.
*/
static unsigned long alpha_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx, long ovf)
{
long prev_raw_count, new_raw_count;
long delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = alpha_read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
*/
if (unlikely(delta < 0)) {
delta += alpha_pmu->pmc_max_period[idx] + 1;
}
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
/*
* Collect all HW events into the array event[].
*/
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *event[], unsigned long *evtype,
int *current_idx)
{
struct perf_event *pe;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
event[n] = group;
evtype[n] = group->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
list_for_each_entry(pe, &group->sibling_list, group_entry) {
if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
event[n] = pe;
evtype[n] = pe->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
}
return n;
}
/*
* Check that a group of events can be simultaneously scheduled on to the PMU.
*/
static int alpha_check_constraints(struct perf_event **events,
unsigned long *evtypes, int n_ev)
{
/* No HW events is possible from hw_perf_group_sched_in(). */
if (n_ev == 0)
return 0;
if (n_ev > alpha_pmu->num_pmcs)
return -1;
return alpha_pmu->check_constraints(events, evtypes, n_ev);
}
/*
* If new events have been scheduled then update cpuc with the new
* configuration. This may involve shifting cycle counts from one PMC to
* another.
*/
static void maybe_change_configuration(struct cpu_hw_events *cpuc)
{
int j;
if (cpuc->n_added == 0)
return;
/* Find counters that are moving to another PMC and update */
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
if (cpuc->current_idx[j] != PMC_NO_INDEX &&
cpuc->current_idx[j] != pe->hw.idx) {
alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
cpuc->current_idx[j] = PMC_NO_INDEX;
}
}
/* Assign to counters all unassigned events. */
cpuc->idx_mask = 0;
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;
if (cpuc->current_idx[j] == PMC_NO_INDEX) {
alpha_perf_event_set_period(pe, hwc, idx);
cpuc->current_idx[j] = idx;
}
if (!(hwc->state & PERF_HES_STOPPED))
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
/* Schedule perf HW event on to PMU.
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
unsigned long irq_flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
* disable, however this can lead to an overflowed counter with the
* PMI disabled on rare occasions. The alpha_perf_event_update()
* routine should detect this situation by noting a negative delta,
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
/* Default to error to be returned */
ret = -EAGAIN;
/* Insert event on to PMU and if successful modify ret to valid return */
n0 = cpuc->n_events;
if (n0 < alpha_pmu->num_pmcs) {
cpuc->event[n0] = event;
cpuc->evtype[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PMC_NO_INDEX;
if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
}
}
hwc->state = PERF_HES_UPTODATE;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_STOPPED;
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
return ret;
}
/* Disable performance monitoring unit
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
unsigned long irq_flags;
int j;
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
int idx = cpuc->current_idx[j];
/* Shift remaining entries down into the existing
* slot.
*/
while (++j < cpuc->n_events) {
cpuc->event[j - 1] = cpuc->event[j];
cpuc->evtype[j - 1] = cpuc->evtype[j];
cpuc->current_idx[j - 1] =
cpuc->current_idx[j];
}
/* Absorb the final count and turn off the event. */
alpha_perf_event_update(event, hwc, idx, 0);
perf_event_update_userpage(event);
cpuc->idx_mask &= ~(1UL<<idx);
cpuc->n_events--;
break;
}
}
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
}
static void alpha_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
alpha_perf_event_update(event, hwc, hwc->idx, 0);
}
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= ~(1UL<<hwc->idx);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
alpha_perf_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_UPTODATE;
}
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
}
static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
alpha_perf_event_set_period(event, hwc, hwc->idx);
}
hwc->state = 0;
cpuc->idx_mask |= 1UL<<hwc->idx;
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}
/*
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
* EV67 but we don't do suffiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
{
struct percpu_struct *cpu;
unsigned long cputype;
/* Get cpu type from HW */
cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
cputype = cpu->type & 0xffffffff;
/* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Nothing to be done! */
return;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
struct perf_event *evts[MAX_HWEVENTS];
unsigned long evtypes[MAX_HWEVENTS];
int idx_rubbish_bin[MAX_HWEVENTS];
int ev;
int n;
/* We only support a limited range of HARDWARE event types with one
* only programmable via a RAW event type.
*/
if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= alpha_pmu->max_events)
return -EINVAL;
ev = alpha_pmu->event_map[attr->config];
} else if (attr->type == PERF_TYPE_HW_CACHE) {
return -EOPNOTSUPP;
} else if (attr->type == PERF_TYPE_RAW) {
ev = attr->config & 0xff;
} else {
return -EOPNOTSUPP;
}
if (ev < 0) {
return ev;
}
/* The EV67 does not support mode exclusion */
if (attr->exclude_kernel || attr->exclude_user
|| attr->exclude_hv || attr->exclude_idle) {
return -EPERM;
}
/*
* We place the event type in event_base here and leave calculation
* of the codes to programme the PMU for alpha_pmu_enable() because
* it is only then we will know what HW events are actually
* scheduled on to the PMU. At that point the code to programme the
* PMU is put into config_base and the PMC to use is placed into
* idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
* it is yet to be determined.
*/
hwc->event_base = ev;
/* Collect events in a group together suitable for calling
* alpha_check_constraints() to verify that the group as a whole can
* be scheduled on to the PMU.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
alpha_pmu->num_pmcs - 1,
evts, evtypes, idx_rubbish_bin);
if (n < 0)
return -EINVAL;
}
evtypes[n] = hwc->event_base;
evts[n] = event;
if (alpha_check_constraints(evts, evtypes, n + 1))
return -EINVAL;
/* Indicate that PMU config and idx are yet to be determined. */
hwc->config_base = 0;
hwc->idx = PMC_NO_INDEX;
event->destroy = hw_perf_event_destroy;
/*
* Most architectures reserve the PMU for their use at this point.
* As there is no existing mechanism to arbitrate usage and there
* appears to be no other user of the Alpha PMU we just assume
* that we can just use it, hence a NO-OP here.
*
* Maybe an alpha_reserve_pmu() routine should be implemented but is
* anything else ever going to use it?
*/
if (!hwc->sample_period) {
hwc->sample_period = alpha_pmu->pmc_max_period[0];
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Main entry point to initialise a HW performance event.
*/
static int alpha_pmu_event_init(struct perf_event *event)
{
int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!alpha_pmu)
return -ENODEV;
/* Do the real initialisation work. */
err = __hw_perf_event_init(event);
return err;
}
/*
* Main entry point - enable HW performance counters.
*/
static void alpha_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
if (cpuc->n_events > 0) {
/* Update cpuc with information from any new scheduled events. */
maybe_change_configuration(cpuc);
/* Start counting the desired events. */
wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
}
}
/*
* Main entry point - disable HW performance counters.
*/
static void alpha_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}
static struct pmu pmu = {
.pmu_enable = alpha_pmu_enable,
.pmu_disable = alpha_pmu_disable,
.event_init = alpha_pmu_event_init,
.add = alpha_pmu_add,
.del = alpha_pmu_del,
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
};
/*
* Main entry point - don't know when this is called but it
* obviously dumps debug info.
*/
void perf_event_print_debug(void)
{
unsigned long flags;
unsigned long pcr;
int pcr0, pcr1;
int cpu;
if (!supported_cpu())
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = wrperfmon(PERFMON_CMD_READ, 0);
pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
local_irq_restore(flags);
}
/*
* Performance Monitoring Interrupt Service Routine called when a PMC
* overflows. The PMC that overflowed is passed in la_ptr.
*/
static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__get_cpu_var(irq_pmi_count)++;
cpuc = &__get_cpu_var(cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
/*
* Init call to initialise performance events at kernel startup.
*/
int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
if (!supported_cpu()) {
pr_cont("No support for your CPU.\n");
return 0;
}
pr_cont("Supported CPU type!\n");
/* Override performance counter IRQ vector */
perf_irq = alpha_perf_event_irq_handler;
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);
|
104,953,433,330,237 | /*
* ARMv6 Performance counter handling code.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
*
* ARMv6 has 2 configurable performance counters and a single cycle counter.
* They all share a single reset bit but can be written to zero so we can use
* that for a reset.
*
* The counters can't be individually enabled or disabled so when we remove
* one event and replace it with another we could get spurious counts from the
* wrong event. However, we can take advantage of the fact that the
* performance counters can export events to the event bus, and the event bus
* itself can be monitored. This requires that we *don't* export the events to
* the event bus. The procedure for disabling a configurable counter is:
* - change the counter to count the ETMEXTOUT[0] signal (0x20). This
* effectively stops the counter from counting.
* - disable the counter's interrupt generation (each counter has it's
* own interrupt enable bit).
* Once stopped, the counter value can be written as 0 to reset.
*
* To enable a counter:
* - enable the counter's interrupt generation.
* - set the new event type.
*
* Note: the dedicated cycle counter only counts cycles and can't be
* enabled/disabled independently of the others. When we want to disable the
* cycle counter, we have to just disable the interrupt reporting and start
* ignoring that counter. When re-enabling, we have to reset the value and
* enable the interrupt.
*/
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
enum armv6_perf_types {
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
ARMV6_PERFCTR_IBUF_STALL = 0x1,
ARMV6_PERFCTR_DDEP_STALL = 0x2,
ARMV6_PERFCTR_ITLB_MISS = 0x3,
ARMV6_PERFCTR_DTLB_MISS = 0x4,
ARMV6_PERFCTR_BR_EXEC = 0x5,
ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
ARMV6_PERFCTR_INSTR_EXEC = 0x7,
ARMV6_PERFCTR_DCACHE_HIT = 0x9,
ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
ARMV6_PERFCTR_DCACHE_MISS = 0xB,
ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
ARMV6_PERFCTR_NOP = 0x20,
};
enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1,
ARMV6_COUNTER0,
ARMV6_COUNTER1,
};
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* The ARM performance counters can count micro DTLB misses,
* micro ITLB misses and main TLB misses. There isn't an event
* for TLB misses, so use the micro misses here and if users
* want the main TLB misses they can use a raw counter.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
enum armv6mpcore_perf_types {
ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
};
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] =
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
[C(RESULT_MISS)] =
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] =
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
[C(RESULT_MISS)] =
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* The ARM performance counters can count micro DTLB misses,
* micro ITLB misses and main TLB misses. There isn't an event
* for TLB misses, so use the micro misses here and if users
* want the main TLB misses they can use a raw counter.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
static inline unsigned long
armv6_pmcr_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
return val;
}
static inline void
armv6_pmcr_write(unsigned long val)
{
asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
}
#define ARMV6_PMCR_ENABLE (1 << 0)
#define ARMV6_PMCR_CTR01_RESET (1 << 1)
#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
#define ARMV6_PMCR_OVERFLOWED_MASK \
(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
ARMV6_PMCR_CCOUNT_OVERFLOW)
static inline int
armv6_pmcr_has_overflowed(unsigned long pmcr)
{
return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
}
static inline int
armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
enum armv6_counters counter)
{
int ret = 0;
if (ARMV6_CYCLE_COUNTER == counter)
ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
else if (ARMV6_COUNTER0 == counter)
ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
else if (ARMV6_COUNTER1 == counter)
ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
return ret;
}
static inline u32
armv6pmu_read_counter(int counter)
{
unsigned long value = 0;
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
else if (ARMV6_COUNTER0 == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
else if (ARMV6_COUNTER1 == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
return value;
}
static inline void
armv6pmu_write_counter(int counter,
u32 value)
{
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
else if (ARMV6_COUNTER0 == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
else if (ARMV6_COUNTER1 == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
static void
armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
evt = ARMV6_PMCR_CCOUNT_IEN;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_EVT_COUNT0_MASK;
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
ARMV6_PMCR_COUNT0_IEN;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_EVT_COUNT1_MASK;
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
ARMV6_PMCR_COUNT1_IEN;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
if (!armv6_pmcr_has_overflowed(pmcr))
return IRQ_NONE;
regs = get_irq_regs();
/*
* The interrupts are cleared by writing the overflow flags back to
* the control register. All of the other bits don't have any effect
* if they are rewritten, so write the whole value back.
*/
armv6_pmcr_write(pmcr);
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void
armv6pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
armv6pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
/* Always place a cycle counter into the cycle counter. */
if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV6_CYCLE_COUNTER;
} else {
/*
* For anything other than a cycle counter, try and use
* counter0 and counter1.
*/
if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
return ARMV6_COUNTER1;
if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
return ARMV6_COUNTER0;
/* The counters are all in use. */
return -EAGAIN;
}
}
static void
armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
evt = 0;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Mask out the current event and set the counter to count the number
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, flags, evt = 0;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_COUNT0_IEN;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_COUNT1_IEN;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static const struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6,
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init armv6pmu_init(void)
{
return &armv6pmu;
}
/*
* ARMv6mpcore is almost identical to single core ARMv6 with the exception
* that some of the events have different enumerations and that there is no
* *hack* to stop the programmable counters. To stop the counters we simply
* disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting.
*/
static const struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return &armv6mpcore_pmu;
}
#else
static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
| /*
* ARMv6 Performance counter handling code.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
*
* ARMv6 has 2 configurable performance counters and a single cycle counter.
* They all share a single reset bit but can be written to zero so we can use
* that for a reset.
*
* The counters can't be individually enabled or disabled so when we remove
* one event and replace it with another we could get spurious counts from the
* wrong event. However, we can take advantage of the fact that the
* performance counters can export events to the event bus, and the event bus
* itself can be monitored. This requires that we *don't* export the events to
* the event bus. The procedure for disabling a configurable counter is:
* - change the counter to count the ETMEXTOUT[0] signal (0x20). This
* effectively stops the counter from counting.
* - disable the counter's interrupt generation (each counter has it's
* own interrupt enable bit).
* Once stopped, the counter value can be written as 0 to reset.
*
* To enable a counter:
* - enable the counter's interrupt generation.
* - set the new event type.
*
* Note: the dedicated cycle counter only counts cycles and can't be
* enabled/disabled independently of the others. When we want to disable the
* cycle counter, we have to just disable the interrupt reporting and start
* ignoring that counter. When re-enabling, we have to reset the value and
* enable the interrupt.
*/
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
enum armv6_perf_types {
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
ARMV6_PERFCTR_IBUF_STALL = 0x1,
ARMV6_PERFCTR_DDEP_STALL = 0x2,
ARMV6_PERFCTR_ITLB_MISS = 0x3,
ARMV6_PERFCTR_DTLB_MISS = 0x4,
ARMV6_PERFCTR_BR_EXEC = 0x5,
ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
ARMV6_PERFCTR_INSTR_EXEC = 0x7,
ARMV6_PERFCTR_DCACHE_HIT = 0x9,
ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
ARMV6_PERFCTR_DCACHE_MISS = 0xB,
ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
ARMV6_PERFCTR_NOP = 0x20,
};
enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1,
ARMV6_COUNTER0,
ARMV6_COUNTER1,
};
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* The ARM performance counters can count micro DTLB misses,
* micro ITLB misses and main TLB misses. There isn't an event
* for TLB misses, so use the micro misses here and if users
* want the main TLB misses they can use a raw counter.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
enum armv6mpcore_perf_types {
ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
};
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] =
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
[C(RESULT_MISS)] =
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] =
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
[C(RESULT_MISS)] =
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* The ARM performance counters can count micro DTLB misses,
* micro ITLB misses and main TLB misses. There isn't an event
* for TLB misses, so use the micro misses here and if users
* want the main TLB misses they can use a raw counter.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
static inline unsigned long
armv6_pmcr_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
return val;
}
static inline void
armv6_pmcr_write(unsigned long val)
{
asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
}
#define ARMV6_PMCR_ENABLE (1 << 0)
#define ARMV6_PMCR_CTR01_RESET (1 << 1)
#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
#define ARMV6_PMCR_OVERFLOWED_MASK \
(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
ARMV6_PMCR_CCOUNT_OVERFLOW)
static inline int
armv6_pmcr_has_overflowed(unsigned long pmcr)
{
return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
}
static inline int
armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
enum armv6_counters counter)
{
int ret = 0;
if (ARMV6_CYCLE_COUNTER == counter)
ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
else if (ARMV6_COUNTER0 == counter)
ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
else if (ARMV6_COUNTER1 == counter)
ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
return ret;
}
static inline u32
armv6pmu_read_counter(int counter)
{
unsigned long value = 0;
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
else if (ARMV6_COUNTER0 == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
else if (ARMV6_COUNTER1 == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
return value;
}
static inline void
armv6pmu_write_counter(int counter,
u32 value)
{
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
else if (ARMV6_COUNTER0 == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
else if (ARMV6_COUNTER1 == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
static void
armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
evt = ARMV6_PMCR_CCOUNT_IEN;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_EVT_COUNT0_MASK;
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
ARMV6_PMCR_COUNT0_IEN;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_EVT_COUNT1_MASK;
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
ARMV6_PMCR_COUNT1_IEN;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
if (!armv6_pmcr_has_overflowed(pmcr))
return IRQ_NONE;
regs = get_irq_regs();
/*
* The interrupts are cleared by writing the overflow flags back to
* the control register. All of the other bits don't have any effect
* if they are rewritten, so write the whole value back.
*/
armv6_pmcr_write(pmcr);
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void
armv6pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
armv6pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
/* Always place a cycle counter into the cycle counter. */
if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV6_CYCLE_COUNTER;
} else {
/*
* For anything other than a cycle counter, try and use
* counter0 and counter1.
*/
if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
return ARMV6_COUNTER1;
if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
return ARMV6_COUNTER0;
/* The counters are all in use. */
return -EAGAIN;
}
}
static void
armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
evt = 0;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Mask out the current event and set the counter to count the number
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, flags, evt = 0;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_COUNT0_IEN;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_COUNT1_IEN;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static const struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6,
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init armv6pmu_init(void)
{
return &armv6pmu;
}
/*
* ARMv6mpcore is almost identical to single core ARMv6 with the exception
* that some of the events have different enumerations and that there is no
* *hack* to stop the programmable counters. To stop the counters we simply
* disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting.
*/
static const struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return &armv6mpcore_pmu;
}
#else
static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
|
238,487,548,694,701 | /*
* ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
*
* ARMv7 support: Jean Pihet <jpihet@mvista.com>
* 2010 (c) MontaVista Software, LLC.
*
* Copied from ARMv6 code, with the low level code inspired
* by the ARMv7 Oprofile code.
*
* Cortex-A8 has up to 4 configurable performance counters and
* a single cycle counter.
* Cortex-A9 has up to 31 configurable performance counters and
* a single cycle counter.
*
* All counters can be enabled/disabled and IRQ masked separately. The cycle
* counter and all 4 performance counters together can be reset separately.
*/
#ifdef CONFIG_CPU_V7
/* Common ARMv7 event types */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
ARMV7_PERFCTR_ITLB_MISS = 0x02,
ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_DREAD = 0x06,
ARMV7_PERFCTR_DWRITE = 0x07,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
* It counts:
* - all branch instructions,
* - instructions that explicitly write the PC,
* - exception generating instructions.
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
ARMV7_PERFCTR_L2_ACCESS = 0x43,
ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
ARMV7_PERFCTR_L2_NEON = 0x4E,
ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
ARMV7_PERFCTR_L1_INST = 0x50,
ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
ARMV7_PERFCTR_OP_EXECUTED = 0x55,
ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
ARMV7_PERFCTR_CYCLES_INST = 0x57,
ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
ARMV7_PERFCTR_PMU_EVENTS = 0x72,
};
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
ARMV7_PERFCTR_DATA_EVICTION = 0x65,
ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
ARMV7_PERFCTR_ISB_INST = 0x90,
ARMV7_PERFCTR_DSB_INST = 0x91,
ARMV7_PERFCTR_DMB_INST = 0x92,
ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
};
/*
* Cortex-A8 HW events mapping
*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
};
static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* Only ITLB misses and DTLB refills are supported.
* If users want the DTLB refills misses a raw counter
* must be used.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
};
static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* Only ITLB misses and DTLB refills are supported.
* If users want the DTLB refills misses a raw counter
* must be used.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Perf Events counters
*/
enum armv7_counters {
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
ARMV7_COUNTER0 = 2, /* First event counter */
};
/*
* The cycle counter is ARMV7_CYCLE_COUNTER.
* The first event counter is ARMV7_COUNTER0.
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
*/
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/*
* ARMv7 low level PMNC access
*/
/*
* Per-CPU PMNC: config reg
*/
#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
#define ARMV7_PMNC_N_MASK 0x1f
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* Available counters
*/
#define ARMV7_CNT0 0 /* First event counter */
#define ARMV7_CCNT 31 /* Cycle counter */
/* Perf Event to low level counters mapping */
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
/*
* CNTENS: counters enable reg
*/
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
/*
* CNTENC: counters disable reg
*/
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
/*
* INTENS: counters overflow interrupt enable reg
*/
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
/*
* INTENC: counters overflow interrupt disable reg
*/
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
/*
* EVTSEL: Event selection reg
*/
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
/*
* SELECT: Counter selection reg
*/
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
/*
* FLAG: counters overflow flag status reg
*/
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
static inline unsigned long armv7_pmnc_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val;
}
static inline void armv7_pmnc_write(unsigned long val)
{
val &= ARMV7_PMNC_MASK;
isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
{
return pmnc & ARMV7_OVERFLOWED_MASK;
}
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
enum armv7_counters counter)
{
int ret = 0;
if (counter == ARMV7_CYCLE_COUNTER)
ret = pmnc & ARMV7_FLAG_C;
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
ret = pmnc & ARMV7_FLAG_P(counter);
else
pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), counter);
return ret;
}
static inline int armv7_pmnc_select_counter(unsigned int idx)
{
u32 val;
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
pr_err("CPU%u selecting wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
isb();
return idx;
}
static inline u32 armv7pmu_read_counter(int idx)
{
unsigned long value = 0;
if (idx == ARMV7_CYCLE_COUNTER)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2"
: "=r" (value));
} else
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
return value;
}
static inline void armv7pmu_write_counter(int idx, u32 value)
{
if (idx == ARMV7_CYCLE_COUNTER)
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2"
: : "r" (value));
} else
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
}
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
{
if (armv7_pmnc_select_counter(idx) == idx) {
val &= ARMV7_EVTSEL_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
}
}
static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u enabling wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_CNTENS_C;
else
val = ARMV7_CNTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u disabling wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_CNTENC_C;
else
val = ARMV7_CNTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u enabling wrong PMNC counter"
" interrupt enable %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_INTENS_C;
else
val = ARMV7_INTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u disabling wrong PMNC counter"
" interrupt enable %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_INTENC_C;
else
val = ARMV7_INTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_getreset_flags(void)
{
u32 val;
/* Read */
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
/* Write to clear flags */
val &= ARMV7_FLAG_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
return val;
}
#ifdef DEBUG
static void armv7_pmnc_dump_regs(void)
{
u32 val;
unsigned int cnt;
printk(KERN_INFO "PMNC registers dump:\n");
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
printk(KERN_INFO "PMNC =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
printk(KERN_INFO "CNTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
printk(KERN_INFO "INTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
printk(KERN_INFO "FLAGS =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
printk(KERN_INFO "SELECT=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val);
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
}
}
#endif
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Set event (if destined for PMNx counters)
* We don't need to set the event if it's a cycle count
*/
if (idx != ARMV7_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens(idx);
/*
* Enable counter
*/
armv7_pmnc_enable_counter(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv7_pmnc_has_overflowed(pmnc))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void armv7pmu_start(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_stop(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
/* Always place a cycle counter into the cycle counter. */
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV7_CYCLE_COUNTER;
} else {
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
}
static void armv7pmu_reset(void *info)
{
u32 idx, nb_cnt = armpmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = 1; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
.disable = armv7pmu_disable_event,
.read_counter = armv7pmu_read_counter,
.write_counter = armv7pmu_write_counter,
.get_event_idx = armv7pmu_get_event_idx,
.start = armv7pmu_start,
.stop = armv7pmu_stop,
.reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1,
};
static u32 __init armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
/* Add the CPU cycles counter and return */
return nb_cnt + 1;
}
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
armv7pmu.event_map = &armv7_a8_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
armv7pmu.event_map = &armv7_a9_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
#else
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V7 */
| /*
* ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
*
* ARMv7 support: Jean Pihet <jpihet@mvista.com>
* 2010 (c) MontaVista Software, LLC.
*
* Copied from ARMv6 code, with the low level code inspired
* by the ARMv7 Oprofile code.
*
* Cortex-A8 has up to 4 configurable performance counters and
* a single cycle counter.
* Cortex-A9 has up to 31 configurable performance counters and
* a single cycle counter.
*
* All counters can be enabled/disabled and IRQ masked separately. The cycle
* counter and all 4 performance counters together can be reset separately.
*/
#ifdef CONFIG_CPU_V7
/* Common ARMv7 event types */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
ARMV7_PERFCTR_ITLB_MISS = 0x02,
ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_DREAD = 0x06,
ARMV7_PERFCTR_DWRITE = 0x07,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
* It counts:
* - all branch instructions,
* - instructions that explicitly write the PC,
* - exception generating instructions.
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
ARMV7_PERFCTR_L2_ACCESS = 0x43,
ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
ARMV7_PERFCTR_L2_NEON = 0x4E,
ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
ARMV7_PERFCTR_L1_INST = 0x50,
ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
ARMV7_PERFCTR_OP_EXECUTED = 0x55,
ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
ARMV7_PERFCTR_CYCLES_INST = 0x57,
ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
ARMV7_PERFCTR_PMU_EVENTS = 0x72,
};
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
ARMV7_PERFCTR_DATA_EVICTION = 0x65,
ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
ARMV7_PERFCTR_ISB_INST = 0x90,
ARMV7_PERFCTR_DSB_INST = 0x91,
ARMV7_PERFCTR_DMB_INST = 0x92,
ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
};
/*
* Cortex-A8 HW events mapping
*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
};
static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* Only ITLB misses and DTLB refills are supported.
* If users want the DTLB refills misses a raw counter
* must be used.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
};
static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* Only ITLB misses and DTLB refills are supported.
* If users want the DTLB refills misses a raw counter
* must be used.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Perf Events counters
*/
enum armv7_counters {
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
ARMV7_COUNTER0 = 2, /* First event counter */
};
/*
* The cycle counter is ARMV7_CYCLE_COUNTER.
* The first event counter is ARMV7_COUNTER0.
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
*/
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/*
* ARMv7 low level PMNC access
*/
/*
* Per-CPU PMNC: config reg
*/
#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
#define ARMV7_PMNC_N_MASK 0x1f
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* Available counters
*/
#define ARMV7_CNT0 0 /* First event counter */
#define ARMV7_CCNT 31 /* Cycle counter */
/* Perf Event to low level counters mapping */
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
/*
* CNTENS: counters enable reg
*/
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
/*
* CNTENC: counters disable reg
*/
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
/*
* INTENS: counters overflow interrupt enable reg
*/
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
/*
* INTENC: counters overflow interrupt disable reg
*/
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
/*
* EVTSEL: Event selection reg
*/
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
/*
* SELECT: Counter selection reg
*/
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
/*
* FLAG: counters overflow flag status reg
*/
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
static inline unsigned long armv7_pmnc_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val;
}
static inline void armv7_pmnc_write(unsigned long val)
{
val &= ARMV7_PMNC_MASK;
isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
{
return pmnc & ARMV7_OVERFLOWED_MASK;
}
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
enum armv7_counters counter)
{
int ret = 0;
if (counter == ARMV7_CYCLE_COUNTER)
ret = pmnc & ARMV7_FLAG_C;
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
ret = pmnc & ARMV7_FLAG_P(counter);
else
pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), counter);
return ret;
}
static inline int armv7_pmnc_select_counter(unsigned int idx)
{
u32 val;
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
pr_err("CPU%u selecting wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
isb();
return idx;
}
static inline u32 armv7pmu_read_counter(int idx)
{
unsigned long value = 0;
if (idx == ARMV7_CYCLE_COUNTER)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2"
: "=r" (value));
} else
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
return value;
}
static inline void armv7pmu_write_counter(int idx, u32 value)
{
if (idx == ARMV7_CYCLE_COUNTER)
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2"
: : "r" (value));
} else
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
}
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
{
if (armv7_pmnc_select_counter(idx) == idx) {
val &= ARMV7_EVTSEL_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
}
}
static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u enabling wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_CNTENS_C;
else
val = ARMV7_CNTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u disabling wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_CNTENC_C;
else
val = ARMV7_CNTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u enabling wrong PMNC counter"
" interrupt enable %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_INTENS_C;
else
val = ARMV7_INTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u disabling wrong PMNC counter"
" interrupt enable %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_INTENC_C;
else
val = ARMV7_INTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_getreset_flags(void)
{
u32 val;
/* Read */
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
/* Write to clear flags */
val &= ARMV7_FLAG_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
return val;
}
#ifdef DEBUG
static void armv7_pmnc_dump_regs(void)
{
u32 val;
unsigned int cnt;
printk(KERN_INFO "PMNC registers dump:\n");
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
printk(KERN_INFO "PMNC =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
printk(KERN_INFO "CNTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
printk(KERN_INFO "INTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
printk(KERN_INFO "FLAGS =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
printk(KERN_INFO "SELECT=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val);
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
}
}
#endif
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Set event (if destined for PMNx counters)
* We don't need to set the event if it's a cycle count
*/
if (idx != ARMV7_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens(idx);
/*
* Enable counter
*/
armv7_pmnc_enable_counter(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv7_pmnc_has_overflowed(pmnc))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void armv7pmu_start(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_stop(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
/* Always place a cycle counter into the cycle counter. */
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV7_CYCLE_COUNTER;
} else {
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
}
static void armv7pmu_reset(void *info)
{
u32 idx, nb_cnt = armpmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = 1; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
.disable = armv7pmu_disable_event,
.read_counter = armv7pmu_read_counter,
.write_counter = armv7pmu_write_counter,
.get_event_idx = armv7pmu_get_event_idx,
.start = armv7pmu_start,
.stop = armv7pmu_stop,
.reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1,
};
static u32 __init armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
/* Add the CPU cycles counter and return */
return nb_cnt + 1;
}
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
armv7pmu.event_map = &armv7_a8_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
armv7pmu.event_map = &armv7_a9_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
#else
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V7 */
|
223,612,686,966,598 | /*
* ARMv5 [xscale] Performance counter handling code.
*
* Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
*
* Based on the previous xscale OProfile code.
*
* There are two variants of the xscale PMU that we support:
* - xscale1pmu: 2 event counters and a cycle counter
* - xscale2pmu: 4 event counters and a cycle counter
* The two variants share event definitions, but have different
* PMU structures.
*/
#ifdef CONFIG_CPU_XSCALE
enum xscale_perf_types {
XSCALE_PERFCTR_ICACHE_MISS = 0x00,
XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
XSCALE_PERFCTR_DATA_STALL = 0x02,
XSCALE_PERFCTR_ITLB_MISS = 0x03,
XSCALE_PERFCTR_DTLB_MISS = 0x04,
XSCALE_PERFCTR_BRANCH = 0x05,
XSCALE_PERFCTR_BRANCH_MISS = 0x06,
XSCALE_PERFCTR_INSTRUCTION = 0x07,
XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
XSCALE_PERFCTR_PC_CHANGED = 0x0D,
XSCALE_PERFCTR_BCU_REQUEST = 0x10,
XSCALE_PERFCTR_BCU_FULL = 0x11,
XSCALE_PERFCTR_BCU_DRAIN = 0x12,
XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
XSCALE_PERFCTR_RMW = 0x16,
/* XSCALE_PERFCTR_CCNT is not hardware defined */
XSCALE_PERFCTR_CCNT = 0xFE,
XSCALE_PERFCTR_UNUSED = 0xFF,
};
enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1,
XSCALE_COUNTER0,
XSCALE_COUNTER1,
XSCALE_COUNTER2,
XSCALE_COUNTER3,
};
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
#define XSCALE_PMU_ENABLE 0x001
#define XSCALE_PMN_RESET 0x002
#define XSCALE_CCNT_RESET 0x004
#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
#define XSCALE_PMU_CNT64 0x008
#define XSCALE1_OVERFLOWED_MASK 0x700
#define XSCALE1_CCOUNT_OVERFLOW 0x400
#define XSCALE1_COUNT0_OVERFLOW 0x100
#define XSCALE1_COUNT1_OVERFLOW 0x200
#define XSCALE1_CCOUNT_INT_EN 0x040
#define XSCALE1_COUNT0_INT_EN 0x010
#define XSCALE1_COUNT1_INT_EN 0x020
#define XSCALE1_COUNT0_EVT_SHFT 12
#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
#define XSCALE1_COUNT1_EVT_SHFT 20
#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
static inline u32
xscale1pmu_read_pmnc(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
return val;
}
static inline void
xscale1pmu_write_pmnc(u32 val)
{
/* upper 4bits and 7, 11 are write-as-0 */
val &= 0xffff77f;
asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
}
static inline int
xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
enum xscale_counters counter)
{
int ret = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
return ret;
}
static irqreturn_t
xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* NOTE: there's an A stepping erratum that states if an overflow
* bit already exists and another occurs, the previous
* Overflow bit gets cleared. There's no workaround.
* Fixed in B stepping or later.
*/
pmnc = xscale1pmu_read_pmnc();
/*
* Write the value back to clear the overflow flags. Overflow
* flags remain in pmnc for use below. We also disable the PMU
* while we process the interrupt.
*/
xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
return IRQ_NONE;
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
}
static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
mask = 0;
evt = XSCALE1_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
mask = XSCALE1_COUNT0_EVT_MASK;
evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
XSCALE1_COUNT0_INT_EN;
break;
case XSCALE_COUNTER1:
mask = XSCALE1_COUNT1_EVT_MASK;
evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
XSCALE1_COUNT1_INT_EN;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
mask = XSCALE1_CCOUNT_INT_EN;
evt = 0;
break;
case XSCALE_COUNTER0:
mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
if (XSCALE_PERFCTR_CCNT == event->config_base) {
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return XSCALE_CYCLE_COUNTER;
} else {
if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
return XSCALE_COUNTER1;
if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
return XSCALE_COUNTER0;
return -EAGAIN;
}
}
static void
xscale1pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale1pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
xscale1pmu_read_counter(int counter)
{
u32 val = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
break;
}
return val;
}
static inline void
xscale1pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
break;
}
}
static const struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1",
.handle_irq = xscale1pmu_handle_irq,
.enable = xscale1pmu_enable_event,
.disable = xscale1pmu_disable_event,
.read_counter = xscale1pmu_read_counter,
.write_counter = xscale1pmu_write_counter,
.get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start,
.stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return &xscale1pmu;
}
#define XSCALE2_OVERFLOWED_MASK 0x01f
#define XSCALE2_CCOUNT_OVERFLOW 0x001
#define XSCALE2_COUNT0_OVERFLOW 0x002
#define XSCALE2_COUNT1_OVERFLOW 0x004
#define XSCALE2_COUNT2_OVERFLOW 0x008
#define XSCALE2_COUNT3_OVERFLOW 0x010
#define XSCALE2_CCOUNT_INT_EN 0x001
#define XSCALE2_COUNT0_INT_EN 0x002
#define XSCALE2_COUNT1_INT_EN 0x004
#define XSCALE2_COUNT2_INT_EN 0x008
#define XSCALE2_COUNT3_INT_EN 0x010
#define XSCALE2_COUNT0_EVT_SHFT 0
#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
#define XSCALE2_COUNT1_EVT_SHFT 8
#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
#define XSCALE2_COUNT2_EVT_SHFT 16
#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
#define XSCALE2_COUNT3_EVT_SHFT 24
#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
static inline u32
xscale2pmu_read_pmnc(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
/* bits 1-2 and 4-23 are read-unpredictable */
return val & 0xff000009;
}
static inline void
xscale2pmu_write_pmnc(u32 val)
{
/* bits 4-23 are write-as-0, 24-31 are write ignored */
val &= 0xf;
asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
}
static inline u32
xscale2pmu_read_overflow_flags(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
return val;
}
static inline void
xscale2pmu_write_overflow_flags(u32 val)
{
asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
}
static inline u32
xscale2pmu_read_event_select(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
return val;
}
static inline void
xscale2pmu_write_event_select(u32 val)
{
asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
}
static inline u32
xscale2pmu_read_int_enable(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
return val;
}
static void
xscale2pmu_write_int_enable(u32 val)
{
asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
}
static inline int
xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
enum xscale_counters counter)
{
int ret = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
return ret;
}
static irqreturn_t
xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/* Disable the PMU. */
pmnc = xscale2pmu_read_pmnc();
xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
/* Check the overflow flag register. */
of_flags = xscale2pmu_read_overflow_flags();
if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
return IRQ_NONE;
/* Clear the overflow bits. */
xscale2pmu_write_overflow_flags(of_flags);
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
}
static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien |= XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien |= XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien |= XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien |= XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien |= XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx = xscale1pmu_get_event_idx(cpuc, event);
if (idx >= 0)
goto out;
if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
idx = XSCALE_COUNTER3;
else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
idx = XSCALE_COUNTER2;
out:
return idx;
}
static void
xscale2pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale2pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
xscale2pmu_read_counter(int counter)
{
u32 val = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER2:
asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER3:
asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
break;
}
return val;
}
static inline void
xscale2pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER2:
asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER3:
asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
break;
}
}
static const struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2",
.handle_irq = xscale2pmu_handle_irq,
.enable = xscale2pmu_enable_event,
.disable = xscale2pmu_disable_event,
.read_counter = xscale2pmu_read_counter,
.write_counter = xscale2pmu_write_counter,
.get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start,
.stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init xscale2pmu_init(void)
{
return &xscale2pmu;
}
#else
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init xscale2pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_XSCALE */
| /*
* ARMv5 [xscale] Performance counter handling code.
*
* Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
*
* Based on the previous xscale OProfile code.
*
* There are two variants of the xscale PMU that we support:
* - xscale1pmu: 2 event counters and a cycle counter
* - xscale2pmu: 4 event counters and a cycle counter
* The two variants share event definitions, but have different
* PMU structures.
*/
#ifdef CONFIG_CPU_XSCALE
enum xscale_perf_types {
XSCALE_PERFCTR_ICACHE_MISS = 0x00,
XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
XSCALE_PERFCTR_DATA_STALL = 0x02,
XSCALE_PERFCTR_ITLB_MISS = 0x03,
XSCALE_PERFCTR_DTLB_MISS = 0x04,
XSCALE_PERFCTR_BRANCH = 0x05,
XSCALE_PERFCTR_BRANCH_MISS = 0x06,
XSCALE_PERFCTR_INSTRUCTION = 0x07,
XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
XSCALE_PERFCTR_PC_CHANGED = 0x0D,
XSCALE_PERFCTR_BCU_REQUEST = 0x10,
XSCALE_PERFCTR_BCU_FULL = 0x11,
XSCALE_PERFCTR_BCU_DRAIN = 0x12,
XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
XSCALE_PERFCTR_RMW = 0x16,
/* XSCALE_PERFCTR_CCNT is not hardware defined */
XSCALE_PERFCTR_CCNT = 0xFE,
XSCALE_PERFCTR_UNUSED = 0xFF,
};
enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1,
XSCALE_COUNTER0,
XSCALE_COUNTER1,
XSCALE_COUNTER2,
XSCALE_COUNTER3,
};
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
#define XSCALE_PMU_ENABLE 0x001
#define XSCALE_PMN_RESET 0x002
#define XSCALE_CCNT_RESET 0x004
#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
#define XSCALE_PMU_CNT64 0x008
#define XSCALE1_OVERFLOWED_MASK 0x700
#define XSCALE1_CCOUNT_OVERFLOW 0x400
#define XSCALE1_COUNT0_OVERFLOW 0x100
#define XSCALE1_COUNT1_OVERFLOW 0x200
#define XSCALE1_CCOUNT_INT_EN 0x040
#define XSCALE1_COUNT0_INT_EN 0x010
#define XSCALE1_COUNT1_INT_EN 0x020
#define XSCALE1_COUNT0_EVT_SHFT 12
#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
#define XSCALE1_COUNT1_EVT_SHFT 20
#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
static inline u32
xscale1pmu_read_pmnc(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
return val;
}
static inline void
xscale1pmu_write_pmnc(u32 val)
{
/* upper 4bits and 7, 11 are write-as-0 */
val &= 0xffff77f;
asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
}
static inline int
xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
enum xscale_counters counter)
{
int ret = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
return ret;
}
static irqreturn_t
xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* NOTE: there's an A stepping erratum that states if an overflow
* bit already exists and another occurs, the previous
* Overflow bit gets cleared. There's no workaround.
* Fixed in B stepping or later.
*/
pmnc = xscale1pmu_read_pmnc();
/*
* Write the value back to clear the overflow flags. Overflow
* flags remain in pmnc for use below. We also disable the PMU
* while we process the interrupt.
*/
xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
return IRQ_NONE;
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
}
static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
mask = 0;
evt = XSCALE1_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
mask = XSCALE1_COUNT0_EVT_MASK;
evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
XSCALE1_COUNT0_INT_EN;
break;
case XSCALE_COUNTER1:
mask = XSCALE1_COUNT1_EVT_MASK;
evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
XSCALE1_COUNT1_INT_EN;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
mask = XSCALE1_CCOUNT_INT_EN;
evt = 0;
break;
case XSCALE_COUNTER0:
mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
if (XSCALE_PERFCTR_CCNT == event->config_base) {
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return XSCALE_CYCLE_COUNTER;
} else {
if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
return XSCALE_COUNTER1;
if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
return XSCALE_COUNTER0;
return -EAGAIN;
}
}
static void
xscale1pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale1pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
xscale1pmu_read_counter(int counter)
{
u32 val = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
break;
}
return val;
}
static inline void
xscale1pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
break;
}
}
static const struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1",
.handle_irq = xscale1pmu_handle_irq,
.enable = xscale1pmu_enable_event,
.disable = xscale1pmu_disable_event,
.read_counter = xscale1pmu_read_counter,
.write_counter = xscale1pmu_write_counter,
.get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start,
.stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return &xscale1pmu;
}
#define XSCALE2_OVERFLOWED_MASK 0x01f
#define XSCALE2_CCOUNT_OVERFLOW 0x001
#define XSCALE2_COUNT0_OVERFLOW 0x002
#define XSCALE2_COUNT1_OVERFLOW 0x004
#define XSCALE2_COUNT2_OVERFLOW 0x008
#define XSCALE2_COUNT3_OVERFLOW 0x010
#define XSCALE2_CCOUNT_INT_EN 0x001
#define XSCALE2_COUNT0_INT_EN 0x002
#define XSCALE2_COUNT1_INT_EN 0x004
#define XSCALE2_COUNT2_INT_EN 0x008
#define XSCALE2_COUNT3_INT_EN 0x010
#define XSCALE2_COUNT0_EVT_SHFT 0
#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
#define XSCALE2_COUNT1_EVT_SHFT 8
#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
#define XSCALE2_COUNT2_EVT_SHFT 16
#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
#define XSCALE2_COUNT3_EVT_SHFT 24
#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
static inline u32
xscale2pmu_read_pmnc(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
/* bits 1-2 and 4-23 are read-unpredictable */
return val & 0xff000009;
}
static inline void
xscale2pmu_write_pmnc(u32 val)
{
/* bits 4-23 are write-as-0, 24-31 are write ignored */
val &= 0xf;
asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
}
static inline u32
xscale2pmu_read_overflow_flags(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
return val;
}
static inline void
xscale2pmu_write_overflow_flags(u32 val)
{
asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
}
static inline u32
xscale2pmu_read_event_select(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
return val;
}
static inline void
xscale2pmu_write_event_select(u32 val)
{
asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
}
static inline u32
xscale2pmu_read_int_enable(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
return val;
}
static void
xscale2pmu_write_int_enable(u32 val)
{
asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
}
static inline int
xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
enum xscale_counters counter)
{
int ret = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
return ret;
}
static irqreturn_t
xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/* Disable the PMU. */
pmnc = xscale2pmu_read_pmnc();
xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
/* Check the overflow flag register. */
of_flags = xscale2pmu_read_overflow_flags();
if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
return IRQ_NONE;
/* Clear the overflow bits. */
xscale2pmu_write_overflow_flags(of_flags);
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
}
static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien |= XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien |= XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien |= XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien |= XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien |= XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx = xscale1pmu_get_event_idx(cpuc, event);
if (idx >= 0)
goto out;
if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
idx = XSCALE_COUNTER3;
else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
idx = XSCALE_COUNTER2;
out:
return idx;
}
static void
xscale2pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale2pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
xscale2pmu_read_counter(int counter)
{
u32 val = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER2:
asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER3:
asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
break;
}
return val;
}
static inline void
xscale2pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER2:
asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER3:
asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
break;
}
}
static const struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2",
.handle_irq = xscale2pmu_handle_irq,
.enable = xscale2pmu_enable_event,
.disable = xscale2pmu_disable_event,
.read_counter = xscale2pmu_read_counter,
.write_counter = xscale2pmu_write_counter,
.get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start,
.stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init xscale2pmu_init(void)
{
return &xscale2pmu;
}
#else
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init xscale2pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_XSCALE */
|
150,003,075,086,849 | /*
* linux/arch/arm/kernel/ptrace.c
*
* By Ross Biro 1/23/92
* edited by Linus Torvalds
* ARM modifications Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/traps.h>
#define REG_PC 15
#define REG_PSR 16
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
#if 0
/*
* Breakpoint SWI instruction: SWI &9F0001
*/
#define BREAKINST_ARM 0xef9f0001
#define BREAKINST_THUMB 0xdf00 /* fill this in later */
#else
/*
* New breakpoints - use an undefined instruction. The ARM architecture
* reference manual guarantees that the following instruction space
* will produce an undefined instruction exception on all CPUs:
*
* ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
* Thumb: 1101 1110 xxxx xxxx
*/
#define BREAKINST_ARM 0xe7f001f0
#define BREAKINST_THUMB 0xde01
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) \
{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(r1),
REG_OFFSET_NAME(r2),
REG_OFFSET_NAME(r3),
REG_OFFSET_NAME(r4),
REG_OFFSET_NAME(r5),
REG_OFFSET_NAME(r6),
REG_OFFSET_NAME(r7),
REG_OFFSET_NAME(r8),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(ip),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(lr),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(cpsr),
REG_OFFSET_NAME(ORIG_r0),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
/*
* this routine will get a word off of the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
* data space.
*/
static inline long get_user_reg(struct task_struct *task, int offset)
{
return task_pt_regs(task)->uregs[offset];
}
/*
* this routine will put a word on the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
* data space.
*/
static inline int
put_user_reg(struct task_struct *task, int offset, long data)
{
struct pt_regs newregs, *regs = task_pt_regs(task);
int ret = -EINVAL;
newregs = *regs;
newregs.uregs[offset] = data;
if (valid_user_regs(&newregs)) {
regs->uregs[offset] = data;
ret = 0;
}
return ret;
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
/* Nothing to do. */
}
/*
* Handle hitting a breakpoint.
*/
void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *)instruction_pointer(regs);
force_sig_info(SIGTRAP, &info, tsk);
}
static int break_trap(struct pt_regs *regs, unsigned int instr)
{
ptrace_break(current, regs);
return 0;
}
static struct undef_hook arm_break_hook = {
.instr_mask = 0x0fffffff,
.instr_val = 0x07f001f0,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = break_trap,
};
static struct undef_hook thumb_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xde01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
};
static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
{
unsigned int instr2;
void __user *pc;
/* Check the second half of the instruction. */
pc = (void __user *)(instruction_pointer(regs) + 2);
if (processor_mode(regs) == SVC_MODE) {
instr2 = *(u16 *) pc;
} else {
get_user(instr2, (u16 __user *)pc);
}
if (instr2 == 0xa000) {
ptrace_break(current, regs);
return 0;
} else {
return 1;
}
}
static struct undef_hook thumb2_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xf7f0,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = thumb2_break_trap,
};
static int __init ptrace_break_init(void)
{
register_undef_hook(&arm_break_hook);
register_undef_hook(&thumb_break_hook);
register_undef_hook(&thumb2_break_hook);
return 0;
}
core_initcall(ptrace_break_init);
/*
* Read the word at offset "off" into the "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
unsigned long __user *ret)
{
unsigned long tmp;
if (off & 3 || off >= sizeof(struct user))
return -EIO;
tmp = 0;
if (off == PT_TEXT_ADDR)
tmp = tsk->mm->start_code;
else if (off == PT_DATA_ADDR)
tmp = tsk->mm->start_data;
else if (off == PT_TEXT_END_ADDR)
tmp = tsk->mm->end_code;
else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
return put_user(tmp, ret);
}
/*
* Write the word at offset "off" into "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
unsigned long val)
{
if (off & 3 || off >= sizeof(struct user))
return -EIO;
if (off >= sizeof(struct pt_regs))
return 0;
return put_user_reg(tsk, off >> 2, val);
}
#ifdef CONFIG_IWMMXT
/*
* Get the child iWMMXt state.
*/
static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
return -ENODATA;
iwmmxt_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
? -EFAULT : 0;
}
/*
* Set the child iWMMXt state.
*/
static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
return -EACCES;
iwmmxt_task_release(thread); /* force a reload */
return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_CRUNCH
/*
* Get the child Crunch state.
*/
static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
? -EFAULT : 0;
}
/*
* Set the child Crunch state.
*/
static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_release(thread); /* force a reload */
return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Convert a virtual register number into an index for a thread_info
* breakpoint array. Breakpoints are identified using positive numbers
* whilst watchpoints are negative. The registers are laid out as pairs
* of (address, control), each pair mapping to a unique hw_breakpoint struct.
* Register 0 is reserved for describing resource information.
*/
static int ptrace_hbp_num_to_idx(long num)
{
if (num < 0)
num = (ARM_MAX_BRP << 1) - num;
return (num - 1) >> 1;
}
/*
* Returns the virtual register number for the address of the
* breakpoint at index idx.
*/
static long ptrace_hbp_idx_to_num(int idx)
{
long mid = ARM_MAX_BRP << 1;
long num = (idx << 1) + 1;
return num > mid ? mid - num : num;
}
/*
* Handle hitting a HW-breakpoint.
*/
static void ptrace_hbptriggered(struct perf_event *bp, int unused,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
long num;
int i;
siginfo_t info;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
if (current->thread.debug.hbp[i] == bp)
break;
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
info.si_signo = SIGTRAP;
info.si_errno = (int)num;
info.si_code = TRAP_HWBKPT;
info.si_addr = (void __user *)(bkpt->trigger);
force_sig_info(SIGTRAP, &info, current);
}
/*
* Set ptrace breakpoint pointers to zero for this task.
* This is required in order to prevent child processes from unregistering
* breakpoints held by their parent.
*/
void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
{
memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
}
/*
* Unregister breakpoints from this task and reset the pointers in
* the thread_struct.
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
if (t->debug.hbp[i]) {
unregister_hw_breakpoint(t->debug.hbp[i]);
t->debug.hbp[i] = NULL;
}
}
}
static u32 ptrace_get_hbp_resource_info(void)
{
u8 num_brps, num_wrps, debug_arch, wp_len;
u32 reg = 0;
num_brps = hw_breakpoint_slots(TYPE_INST);
num_wrps = hw_breakpoint_slots(TYPE_DATA);
debug_arch = arch_get_debug_arch();
wp_len = arch_get_max_wp_len();
reg |= debug_arch;
reg <<= 8;
reg |= wp_len;
reg <<= 8;
reg |= num_wrps;
reg <<= 8;
reg |= num_brps;
return reg;
}
static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
{
struct perf_event_attr attr;
ptrace_breakpoint_init(&attr);
/* Initialise fields to sane defaults. */
attr.bp_addr = 0;
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = type;
attr.disabled = 1;
return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
}
static int ptrace_gethbpregs(struct task_struct *tsk, long num,
unsigned long __user *data)
{
u32 reg;
int idx, ret = 0;
struct perf_event *bp;
struct arch_hw_breakpoint_ctrl arch_ctrl;
if (num == 0) {
reg = ptrace_get_hbp_resource_info();
} else {
idx = ptrace_hbp_num_to_idx(num);
if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL;
goto out;
}
bp = tsk->thread.debug.hbp[idx];
if (!bp) {
reg = 0;
goto put;
}
arch_ctrl = counter_arch_bp(bp)->ctrl;
/*
* Fix up the len because we may have adjusted it
* to compensate for an unaligned address.
*/
while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1;
if (num & 0x1)
reg = bp->attr.bp_addr;
else
reg = encode_ctrl_reg(arch_ctrl);
}
put:
if (put_user(reg, data))
ret = -EFAULT;
out:
return ret;
}
static int ptrace_sethbpregs(struct task_struct *tsk, long num,
unsigned long __user *data)
{
int idx, gen_len, gen_type, implied_type, ret = 0;
u32 user_val;
struct perf_event *bp;
struct arch_hw_breakpoint_ctrl ctrl;
struct perf_event_attr attr;
if (num == 0)
goto out;
else if (num < 0)
implied_type = HW_BREAKPOINT_RW;
else
implied_type = HW_BREAKPOINT_X;
idx = ptrace_hbp_num_to_idx(num);
if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL;
goto out;
}
if (get_user(user_val, data)) {
ret = -EFAULT;
goto out;
}
bp = tsk->thread.debug.hbp[idx];
if (!bp) {
bp = ptrace_hbp_create(tsk, implied_type);
if (IS_ERR(bp)) {
ret = PTR_ERR(bp);
goto out;
}
tsk->thread.debug.hbp[idx] = bp;
}
attr = bp->attr;
if (num & 0x1) {
/* Address */
attr.bp_addr = user_val;
} else {
/* Control */
decode_ctrl_reg(user_val, &ctrl);
ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
if (ret)
goto out;
if ((gen_type & implied_type) != gen_type) {
ret = -EINVAL;
goto out;
}
attr.bp_len = gen_len;
attr.bp_type = gen_type;
attr.disabled = !ctrl.enabled;
}
ret = modify_user_hw_breakpoint(bp, &attr);
out:
return ret;
}
#endif
/* regset get/set implementations */
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs,
0, sizeof(*regs));
}
static int gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs newregs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,
0, sizeof(newregs));
if (ret)
return ret;
if (!valid_user_regs(&newregs))
return -EINVAL;
*task_pt_regs(target) = newregs;
return 0;
}
static int fpa_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&task_thread_info(target)->fpstate,
0, sizeof(struct user_fp));
}
static int fpa_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct thread_info *thread = task_thread_info(target);
thread->used_cp[1] = thread->used_cp[2] = 1;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&thread->fpstate,
0, sizeof(struct user_fp));
}
#ifdef CONFIG_VFP
/*
* VFP register get/set implementations.
*
* With respect to the kernel, struct user_fp is divided into three chunks:
* 16 or 32 real VFP registers (d0-d15 or d0-31)
* These are transferred to/from the real registers in the task's
* vfp_hard_struct. The number of registers depends on the kernel
* configuration.
*
* 16 or 0 fake VFP registers (d16-d31 or empty)
* i.e., the user_vfp structure has space for 32 registers even if
* the kernel doesn't have them all.
*
* vfp_get() reads this chunk as zero where applicable
* vfp_set() ignores this chunk
*
* 1 word for the FPSCR
*
* The bounds-checking logic built into user_regset_copyout and friends
* means that we can make a simple sequence of calls to map the relevant data
* to/from the specified slice of the user regset structure.
*/
static int vfp_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&vfp->fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(vfp->fpregs));
if (ret)
return ret;
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(vfp->fpregs),
user_fpscr_offset);
if (ret)
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&vfp->fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(vfp->fpscr));
}
/*
* For vfp_set() a read-modify-write is done on the VFP registers,
* in order to avoid writing back a half-modified set of registers on
* failure.
*/
static int vfp_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(new_vfp.fpregs));
if (ret)
return ret;
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(new_vfp.fpregs),
user_fpscr_offset);
if (ret)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(new_vfp.fpscr));
if (ret)
return ret;
vfp_sync_hwstate(thread);
thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread);
return 0;
}
#endif /* CONFIG_VFP */
enum arm_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_VFP
REGSET_VFP,
#endif
};
static const struct user_regset arm_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(u32),
.align = sizeof(u32),
.get = gpr_get,
.set = gpr_set
},
[REGSET_FPR] = {
/*
* For the FPA regs in fpstate, the real fields are a mixture
* of sizes, so pretend that the registers are word-sized:
*/
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fp) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = fpa_get,
.set = fpa_set
},
#ifdef CONFIG_VFP
[REGSET_VFP] = {
/*
* Pretend that the VFP regs are word-sized, since the FPSCR is
* a single word dangling at the end of struct user_vfp:
*/
.core_note_type = NT_ARM_VFP,
.n = ARM_VFPREGS_SIZE / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = vfp_get,
.set = vfp_set
},
#endif /* CONFIG_VFP */
};
static const struct user_regset_view user_arm_view = {
.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_arm_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
case PTRACE_GETREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_GPR,
0, sizeof(struct pt_regs),
datap);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_GPR,
0, sizeof(struct pt_regs),
datap);
break;
case PTRACE_GETFPREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_FPR,
0, sizeof(union fp_state),
datap);
break;
case PTRACE_SETFPREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_FPR,
0, sizeof(union fp_state),
datap);
break;
#ifdef CONFIG_IWMMXT
case PTRACE_GETWMMXREGS:
ret = ptrace_getwmmxregs(child, datap);
break;
case PTRACE_SETWMMXREGS:
ret = ptrace_setwmmxregs(child, datap);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
datap);
break;
case PTRACE_SET_SYSCALL:
task_thread_info(child)->syscall = data;
ret = 0;
break;
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
ret = ptrace_getcrunchregs(child, datap);
break;
case PTRACE_SETCRUNCHREGS:
ret = ptrace_setcrunchregs(child, datap);
break;
#endif
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_VFP,
0, ARM_VFPREGS_SIZE,
datap);
break;
case PTRACE_SETVFPREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_VFP,
0, ARM_VFPREGS_SIZE,
datap);
break;
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
case PTRACE_GETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_gethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
case PTRACE_SETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_sethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
if (!(current->ptrace & PT_PTRACED))
return scno;
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
current_thread_info()->syscall = scno;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
regs->ARM_ip = ip;
return current_thread_info()->syscall;
}
| /*
* linux/arch/arm/kernel/ptrace.c
*
* By Ross Biro 1/23/92
* edited by Linus Torvalds
* ARM modifications Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/traps.h>
#define REG_PC 15
#define REG_PSR 16
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
#if 0
/*
* Breakpoint SWI instruction: SWI &9F0001
*/
#define BREAKINST_ARM 0xef9f0001
#define BREAKINST_THUMB 0xdf00 /* fill this in later */
#else
/*
* New breakpoints - use an undefined instruction. The ARM architecture
* reference manual guarantees that the following instruction space
* will produce an undefined instruction exception on all CPUs:
*
* ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
* Thumb: 1101 1110 xxxx xxxx
*/
#define BREAKINST_ARM 0xe7f001f0
#define BREAKINST_THUMB 0xde01
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) \
{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(r1),
REG_OFFSET_NAME(r2),
REG_OFFSET_NAME(r3),
REG_OFFSET_NAME(r4),
REG_OFFSET_NAME(r5),
REG_OFFSET_NAME(r6),
REG_OFFSET_NAME(r7),
REG_OFFSET_NAME(r8),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(ip),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(lr),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(cpsr),
REG_OFFSET_NAME(ORIG_r0),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
/*
* this routine will get a word off of the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
* data space.
*/
static inline long get_user_reg(struct task_struct *task, int offset)
{
return task_pt_regs(task)->uregs[offset];
}
/*
* this routine will put a word on the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
* data space.
*/
static inline int
put_user_reg(struct task_struct *task, int offset, long data)
{
struct pt_regs newregs, *regs = task_pt_regs(task);
int ret = -EINVAL;
newregs = *regs;
newregs.uregs[offset] = data;
if (valid_user_regs(&newregs)) {
regs->uregs[offset] = data;
ret = 0;
}
return ret;
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
/* Nothing to do. */
}
/*
* Handle hitting a breakpoint.
*/
void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *)instruction_pointer(regs);
force_sig_info(SIGTRAP, &info, tsk);
}
static int break_trap(struct pt_regs *regs, unsigned int instr)
{
ptrace_break(current, regs);
return 0;
}
static struct undef_hook arm_break_hook = {
.instr_mask = 0x0fffffff,
.instr_val = 0x07f001f0,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = break_trap,
};
static struct undef_hook thumb_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xde01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
};
static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
{
unsigned int instr2;
void __user *pc;
/* Check the second half of the instruction. */
pc = (void __user *)(instruction_pointer(regs) + 2);
if (processor_mode(regs) == SVC_MODE) {
instr2 = *(u16 *) pc;
} else {
get_user(instr2, (u16 __user *)pc);
}
if (instr2 == 0xa000) {
ptrace_break(current, regs);
return 0;
} else {
return 1;
}
}
static struct undef_hook thumb2_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xf7f0,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = thumb2_break_trap,
};
static int __init ptrace_break_init(void)
{
register_undef_hook(&arm_break_hook);
register_undef_hook(&thumb_break_hook);
register_undef_hook(&thumb2_break_hook);
return 0;
}
core_initcall(ptrace_break_init);
/*
* Read the word at offset "off" into the "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
unsigned long __user *ret)
{
unsigned long tmp;
if (off & 3 || off >= sizeof(struct user))
return -EIO;
tmp = 0;
if (off == PT_TEXT_ADDR)
tmp = tsk->mm->start_code;
else if (off == PT_DATA_ADDR)
tmp = tsk->mm->start_data;
else if (off == PT_TEXT_END_ADDR)
tmp = tsk->mm->end_code;
else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
return put_user(tmp, ret);
}
/*
* Write the word at offset "off" into "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
unsigned long val)
{
if (off & 3 || off >= sizeof(struct user))
return -EIO;
if (off >= sizeof(struct pt_regs))
return 0;
return put_user_reg(tsk, off >> 2, val);
}
#ifdef CONFIG_IWMMXT
/*
* Get the child iWMMXt state.
*/
static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
return -ENODATA;
iwmmxt_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
? -EFAULT : 0;
}
/*
* Set the child iWMMXt state.
*/
static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
return -EACCES;
iwmmxt_task_release(thread); /* force a reload */
return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_CRUNCH
/*
* Get the child Crunch state.
*/
static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
? -EFAULT : 0;
}
/*
* Set the child Crunch state.
*/
static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_release(thread); /* force a reload */
return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Convert a virtual register number into an index for a thread_info
* breakpoint array. Breakpoints are identified using positive numbers
* whilst watchpoints are negative. The registers are laid out as pairs
* of (address, control), each pair mapping to a unique hw_breakpoint struct.
* Register 0 is reserved for describing resource information.
*/
static int ptrace_hbp_num_to_idx(long num)
{
if (num < 0)
num = (ARM_MAX_BRP << 1) - num;
return (num - 1) >> 1;
}
/*
* Returns the virtual register number for the address of the
* breakpoint at index idx.
*/
static long ptrace_hbp_idx_to_num(int idx)
{
long mid = ARM_MAX_BRP << 1;
long num = (idx << 1) + 1;
return num > mid ? mid - num : num;
}
/*
* Handle hitting a HW-breakpoint.
*/
static void ptrace_hbptriggered(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
long num;
int i;
siginfo_t info;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
if (current->thread.debug.hbp[i] == bp)
break;
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
info.si_signo = SIGTRAP;
info.si_errno = (int)num;
info.si_code = TRAP_HWBKPT;
info.si_addr = (void __user *)(bkpt->trigger);
force_sig_info(SIGTRAP, &info, current);
}
/*
* Set ptrace breakpoint pointers to zero for this task.
* This is required in order to prevent child processes from unregistering
* breakpoints held by their parent.
*/
void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
{
memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
}
/*
* Unregister breakpoints from this task and reset the pointers in
* the thread_struct.
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
if (t->debug.hbp[i]) {
unregister_hw_breakpoint(t->debug.hbp[i]);
t->debug.hbp[i] = NULL;
}
}
}
static u32 ptrace_get_hbp_resource_info(void)
{
u8 num_brps, num_wrps, debug_arch, wp_len;
u32 reg = 0;
num_brps = hw_breakpoint_slots(TYPE_INST);
num_wrps = hw_breakpoint_slots(TYPE_DATA);
debug_arch = arch_get_debug_arch();
wp_len = arch_get_max_wp_len();
reg |= debug_arch;
reg <<= 8;
reg |= wp_len;
reg <<= 8;
reg |= num_wrps;
reg <<= 8;
reg |= num_brps;
return reg;
}
static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
{
struct perf_event_attr attr;
ptrace_breakpoint_init(&attr);
/* Initialise fields to sane defaults. */
attr.bp_addr = 0;
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = type;
attr.disabled = 1;
return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
}
static int ptrace_gethbpregs(struct task_struct *tsk, long num,
unsigned long __user *data)
{
u32 reg;
int idx, ret = 0;
struct perf_event *bp;
struct arch_hw_breakpoint_ctrl arch_ctrl;
if (num == 0) {
reg = ptrace_get_hbp_resource_info();
} else {
idx = ptrace_hbp_num_to_idx(num);
if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL;
goto out;
}
bp = tsk->thread.debug.hbp[idx];
if (!bp) {
reg = 0;
goto put;
}
arch_ctrl = counter_arch_bp(bp)->ctrl;
/*
* Fix up the len because we may have adjusted it
* to compensate for an unaligned address.
*/
while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1;
if (num & 0x1)
reg = bp->attr.bp_addr;
else
reg = encode_ctrl_reg(arch_ctrl);
}
put:
if (put_user(reg, data))
ret = -EFAULT;
out:
return ret;
}
static int ptrace_sethbpregs(struct task_struct *tsk, long num,
unsigned long __user *data)
{
int idx, gen_len, gen_type, implied_type, ret = 0;
u32 user_val;
struct perf_event *bp;
struct arch_hw_breakpoint_ctrl ctrl;
struct perf_event_attr attr;
if (num == 0)
goto out;
else if (num < 0)
implied_type = HW_BREAKPOINT_RW;
else
implied_type = HW_BREAKPOINT_X;
idx = ptrace_hbp_num_to_idx(num);
if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL;
goto out;
}
if (get_user(user_val, data)) {
ret = -EFAULT;
goto out;
}
bp = tsk->thread.debug.hbp[idx];
if (!bp) {
bp = ptrace_hbp_create(tsk, implied_type);
if (IS_ERR(bp)) {
ret = PTR_ERR(bp);
goto out;
}
tsk->thread.debug.hbp[idx] = bp;
}
attr = bp->attr;
if (num & 0x1) {
/* Address */
attr.bp_addr = user_val;
} else {
/* Control */
decode_ctrl_reg(user_val, &ctrl);
ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
if (ret)
goto out;
if ((gen_type & implied_type) != gen_type) {
ret = -EINVAL;
goto out;
}
attr.bp_len = gen_len;
attr.bp_type = gen_type;
attr.disabled = !ctrl.enabled;
}
ret = modify_user_hw_breakpoint(bp, &attr);
out:
return ret;
}
#endif
/* regset get/set implementations */
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs,
0, sizeof(*regs));
}
static int gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs newregs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,
0, sizeof(newregs));
if (ret)
return ret;
if (!valid_user_regs(&newregs))
return -EINVAL;
*task_pt_regs(target) = newregs;
return 0;
}
static int fpa_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&task_thread_info(target)->fpstate,
0, sizeof(struct user_fp));
}
static int fpa_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct thread_info *thread = task_thread_info(target);
thread->used_cp[1] = thread->used_cp[2] = 1;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&thread->fpstate,
0, sizeof(struct user_fp));
}
#ifdef CONFIG_VFP
/*
* VFP register get/set implementations.
*
* With respect to the kernel, struct user_fp is divided into three chunks:
* 16 or 32 real VFP registers (d0-d15 or d0-31)
* These are transferred to/from the real registers in the task's
* vfp_hard_struct. The number of registers depends on the kernel
* configuration.
*
* 16 or 0 fake VFP registers (d16-d31 or empty)
* i.e., the user_vfp structure has space for 32 registers even if
* the kernel doesn't have them all.
*
* vfp_get() reads this chunk as zero where applicable
* vfp_set() ignores this chunk
*
* 1 word for the FPSCR
*
* The bounds-checking logic built into user_regset_copyout and friends
* means that we can make a simple sequence of calls to map the relevant data
* to/from the specified slice of the user regset structure.
*/
static int vfp_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&vfp->fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(vfp->fpregs));
if (ret)
return ret;
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(vfp->fpregs),
user_fpscr_offset);
if (ret)
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&vfp->fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(vfp->fpscr));
}
/*
* For vfp_set() a read-modify-write is done on the VFP registers,
* in order to avoid writing back a half-modified set of registers on
* failure.
*/
static int vfp_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(new_vfp.fpregs));
if (ret)
return ret;
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(new_vfp.fpregs),
user_fpscr_offset);
if (ret)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(new_vfp.fpscr));
if (ret)
return ret;
vfp_sync_hwstate(thread);
thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread);
return 0;
}
#endif /* CONFIG_VFP */
enum arm_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_VFP
REGSET_VFP,
#endif
};
static const struct user_regset arm_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(u32),
.align = sizeof(u32),
.get = gpr_get,
.set = gpr_set
},
[REGSET_FPR] = {
/*
* For the FPA regs in fpstate, the real fields are a mixture
* of sizes, so pretend that the registers are word-sized:
*/
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fp) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = fpa_get,
.set = fpa_set
},
#ifdef CONFIG_VFP
[REGSET_VFP] = {
/*
* Pretend that the VFP regs are word-sized, since the FPSCR is
* a single word dangling at the end of struct user_vfp:
*/
.core_note_type = NT_ARM_VFP,
.n = ARM_VFPREGS_SIZE / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = vfp_get,
.set = vfp_set
},
#endif /* CONFIG_VFP */
};
static const struct user_regset_view user_arm_view = {
.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_arm_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
case PTRACE_GETREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_GPR,
0, sizeof(struct pt_regs),
datap);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_GPR,
0, sizeof(struct pt_regs),
datap);
break;
case PTRACE_GETFPREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_FPR,
0, sizeof(union fp_state),
datap);
break;
case PTRACE_SETFPREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_FPR,
0, sizeof(union fp_state),
datap);
break;
#ifdef CONFIG_IWMMXT
case PTRACE_GETWMMXREGS:
ret = ptrace_getwmmxregs(child, datap);
break;
case PTRACE_SETWMMXREGS:
ret = ptrace_setwmmxregs(child, datap);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
datap);
break;
case PTRACE_SET_SYSCALL:
task_thread_info(child)->syscall = data;
ret = 0;
break;
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
ret = ptrace_getcrunchregs(child, datap);
break;
case PTRACE_SETCRUNCHREGS:
ret = ptrace_setcrunchregs(child, datap);
break;
#endif
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_VFP,
0, ARM_VFPREGS_SIZE,
datap);
break;
case PTRACE_SETVFPREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_VFP,
0, ARM_VFPREGS_SIZE,
datap);
break;
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
case PTRACE_GETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_gethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
case PTRACE_SETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_sethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
if (!(current->ptrace & PT_PTRACED))
return scno;
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
current_thread_info()->syscall = scno;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
regs->ARM_ip = ip;
return current_thread_info()->syscall;
}
|
62,161,942,600,979 | /*
* linux/arch/arm/kernel/swp_emulate.c
*
* Copyright (C) 2009 ARM Limited
* __user_* functions adapted from include/asm/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
* store-exclusive for processors that have them disabled (or future ones that
* might not implement them).
*
* Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
* Where: Rt = destination
* Rt2 = source
* Rn = address
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/perf_event.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
/*
* Error-checking SWP macros implemented using ldrex{b}/strex{b}
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
" mov %2, %1\n" \
"0: ldrex"B" %1, [%3]\n" \
"1: strex"B" %0, %2, [%3]\n" \
" cmp %0, #0\n" \
" movne %0, %4\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %5\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 0b, 3b\n" \
" .long 1b, 3b\n" \
" .previous" \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "cc", "memory")
#define __user_swp_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "")
#define __user_swpb_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "b")
/*
* Macros/defines for extracting register numbers from instruction.
*/
#define EXTRACT_REG_NUM(instruction, offset) \
(((instruction) & (0xf << (offset))) >> (offset))
#define RN_OFFSET 16
#define RT_OFFSET 12
#define RT2_OFFSET 0
/*
* Bit 22 of the instruction encoding distinguishes between
* the SWP and SWPB variants (bit set means SWPB).
*/
#define TYPE_SWPB (1 << 22)
static unsigned long swpcounter;
static unsigned long swpbcounter;
static unsigned long abtcounter;
static pid_t previous_pid;
#ifdef CONFIG_PROC_FS
static int proc_read_status(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
char *p = page;
int len;
p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
if (previous_pid != 0)
p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
len = (p - page) - off;
if (len < 0)
len = 0;
*eof = (len <= count) ? 1 : 0;
*start = page + off;
return len;
}
#endif
/*
* Set up process info to signal segmentation fault - called on access error.
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) instruction_pointer(regs);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs, &info, 0, 0);
abtcounter++;
}
static int emulate_swpX(unsigned int address, unsigned int *data,
unsigned int type)
{
unsigned int res = 0;
if ((type != TYPE_SWPB) && (address & 0x3)) {
/* SWP to unaligned address not permitted */
pr_debug("SWP instruction on unaligned pointer!\n");
return -EFAULT;
}
while (1) {
unsigned long temp;
/*
* Barrier required between accessing protected resource and
* releasing a lock for it. Legacy code might not have done
* this, and we cannot determine that this is not the case
* being emulated, so insert always.
*/
smp_mb();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
cond_resched();
}
if (res == 0) {
/*
* Barrier also required between acquiring a lock for a
* protected resource and accessing the resource. Inserted for
* same reason as above.
*/
smp_mb();
if (type == TYPE_SWPB)
swpbcounter++;
else
swpcounter++;
}
return res;
}
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
* deals with fixup/error handling before returning
*/
static int swp_handler(struct pt_regs *regs, unsigned int instr)
{
unsigned int address, destreg, data, type;
unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
previous_pid = current->pid;
}
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
type = instr & TYPE_SWPB;
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
} else {
res = emulate_swpX(address, &data, type);
}
if (res == 0) {
/*
* On successful emulation, revert the adjustment to the PC
* made in kernel/traps.c in order to resume execution at the
* instruction following the SWP{B}.
*/
regs->ARM_pc += 4;
regs->uregs[destreg] = data;
} else if (res == -EFAULT) {
/*
* Memory errors do not mean emulation failed.
* Set up signal info to return SEGV, then return OK
*/
set_segfault(regs, address);
}
return 0;
}
/*
* Only emulate SWP/SWPB executed in ARM state/User mode.
* The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
*/
static struct undef_hook swp_hook = {
.instr_mask = 0x0fb00ff0,
.instr_val = 0x01000090,
.cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
.cpsr_val = USR_MODE,
.fn = swp_handler
};
/*
* Register handler and create status file in /proc/cpu
* Invoked as late_initcall, since not needed before init spawned.
*/
static int __init swp_emulation_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
if (!res)
return -ENOMEM;
res->read_proc = proc_read_status;
#endif /* CONFIG_PROC_FS */
printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
register_undef_hook(&swp_hook);
return 0;
}
late_initcall(swp_emulation_init);
| /*
* linux/arch/arm/kernel/swp_emulate.c
*
* Copyright (C) 2009 ARM Limited
* __user_* functions adapted from include/asm/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
* store-exclusive for processors that have them disabled (or future ones that
* might not implement them).
*
* Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
* Where: Rt = destination
* Rt2 = source
* Rn = address
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/perf_event.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
/*
* Error-checking SWP macros implemented using ldrex{b}/strex{b}
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
" mov %2, %1\n" \
"0: ldrex"B" %1, [%3]\n" \
"1: strex"B" %0, %2, [%3]\n" \
" cmp %0, #0\n" \
" movne %0, %4\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %5\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 0b, 3b\n" \
" .long 1b, 3b\n" \
" .previous" \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "cc", "memory")
#define __user_swp_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "")
#define __user_swpb_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "b")
/*
* Macros/defines for extracting register numbers from instruction.
*/
#define EXTRACT_REG_NUM(instruction, offset) \
(((instruction) & (0xf << (offset))) >> (offset))
#define RN_OFFSET 16
#define RT_OFFSET 12
#define RT2_OFFSET 0
/*
* Bit 22 of the instruction encoding distinguishes between
* the SWP and SWPB variants (bit set means SWPB).
*/
#define TYPE_SWPB (1 << 22)
static unsigned long swpcounter;
static unsigned long swpbcounter;
static unsigned long abtcounter;
static pid_t previous_pid;
#ifdef CONFIG_PROC_FS
static int proc_read_status(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
char *p = page;
int len;
p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
if (previous_pid != 0)
p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
len = (p - page) - off;
if (len < 0)
len = 0;
*eof = (len <= count) ? 1 : 0;
*start = page + off;
return len;
}
#endif
/*
* Set up process info to signal segmentation fault - called on access error.
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) instruction_pointer(regs);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs, &info, 0, 0);
abtcounter++;
}
static int emulate_swpX(unsigned int address, unsigned int *data,
unsigned int type)
{
unsigned int res = 0;
if ((type != TYPE_SWPB) && (address & 0x3)) {
/* SWP to unaligned address not permitted */
pr_debug("SWP instruction on unaligned pointer!\n");
return -EFAULT;
}
while (1) {
unsigned long temp;
/*
* Barrier required between accessing protected resource and
* releasing a lock for it. Legacy code might not have done
* this, and we cannot determine that this is not the case
* being emulated, so insert always.
*/
smp_mb();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
cond_resched();
}
if (res == 0) {
/*
* Barrier also required between acquiring a lock for a
* protected resource and accessing the resource. Inserted for
* same reason as above.
*/
smp_mb();
if (type == TYPE_SWPB)
swpbcounter++;
else
swpcounter++;
}
return res;
}
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
* deals with fixup/error handling before returning
*/
static int swp_handler(struct pt_regs *regs, unsigned int instr)
{
unsigned int address, destreg, data, type;
unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
previous_pid = current->pid;
}
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
type = instr & TYPE_SWPB;
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
} else {
res = emulate_swpX(address, &data, type);
}
if (res == 0) {
/*
* On successful emulation, revert the adjustment to the PC
* made in kernel/traps.c in order to resume execution at the
* instruction following the SWP{B}.
*/
regs->ARM_pc += 4;
regs->uregs[destreg] = data;
} else if (res == -EFAULT) {
/*
* Memory errors do not mean emulation failed.
* Set up signal info to return SEGV, then return OK
*/
set_segfault(regs, address);
}
return 0;
}
/*
* Only emulate SWP/SWPB executed in ARM state/User mode.
* The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
*/
static struct undef_hook swp_hook = {
.instr_mask = 0x0fb00ff0,
.instr_val = 0x01000090,
.cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
.cpsr_val = USR_MODE,
.fn = swp_handler
};
/*
* Register handler and create status file in /proc/cpu
* Invoked as late_initcall, since not needed before init spawned.
*/
static int __init swp_emulation_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
if (!res)
return -ENOMEM;
res->read_proc = proc_read_status;
#endif /* CONFIG_PROC_FS */
printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
register_undef_hook(&swp_hook);
return 0;
}
late_initcall(swp_emulation_init);
|
228,527,954,452,509 | /*
* linux/arch/arm/mm/fault.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "fault.h"
/*
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
}
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
int ret = 0;
if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, fsr))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
return 0;
}
#endif
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
*/
void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
printk(KERN_ALERT "[%08lx] *pgd=%08llx",
addr, (long long)pgd_val(*pgd));
do {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (pgd_none(*pgd))
break;
if (pgd_bad(*pgd)) {
printk("(bad)");
break;
}
pud = pud_offset(pgd, addr);
if (PTRS_PER_PUD != 1)
printk(", *pud=%08lx", pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
printk("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
printk("(bad)");
break;
}
/* We must not map this if we have highmem enabled */
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08llx", (long long)pte_val(*pte));
printk(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
pte_unmap(pte);
} while(0);
printk("\n");
}
#else /* CONFIG_MMU */
void show_pte(struct mm_struct *mm, unsigned long addr)
{ }
#endif /* CONFIG_MMU */
/*
* Oops. The kernel tried to access some page that wasn't present.
*/
static void
__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
/*
* Are we prepared to handle this kernel fault?
*/
if (fixup_exception(regs))
return;
/*
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT
"Unable to handle kernel %s at virtual address %08lx\n",
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
show_pte(mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
do_exit(SIGKILL);
}
/*
* Something tried to access memory that isn't in our memory map..
* User mode accesses just cause a SIGSEGV
*/
static void
__do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int fsr, unsigned int sig, int code,
struct pt_regs *regs)
{
struct siginfo si;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_SEGV) {
printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
tsk->comm, sig, addr, fsr);
show_pte(tsk->mm, addr);
show_regs(regs);
}
#endif
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
si.si_signo = sig;
si.si_errno = 0;
si.si_code = code;
si.si_addr = (void __user *)addr;
force_sig_info(sig, &si, tsk);
}
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (user_mode(regs))
__do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
else
__do_kernel_fault(mm, addr, fsr, regs);
}
#ifdef CONFIG_MMU
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
/*
* Check that the permissions on the VMA allow for the fault which occurred.
* If we encountered a write fault, we must have write permission, otherwise
* we allow any permission.
*/
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
if (fsr & FSR_WRITE)
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
return vma->vm_flags & mask ? false : true;
}
static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
if (unlikely(!vma))
goto out;
if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
* Ok, we have a good vm_area for this
* memory access, so we can handle it.
*/
good_area:
if (access_error(fsr, vma)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR))
return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
return fault;
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
goto good_area;
out:
return fault;
}
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
if (notify_page_fault(regs, fsr))
return 0;
tsk = current;
mm = tsk->mm;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case, we'll have missed the might_sleep() from
* down_read()
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) &&
!search_exception_tables(regs->ARM_pc))
goto no_context;
#endif
}
fault = __do_page_fault(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
if (fault & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
else if (fault & VM_FAULT_MINOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
* userspace (which will retry the fault, or kill us if we
* got oom-killed)
*/
pagefault_out_of_memory();
return 0;
}
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
* successfully fix up this page fault.
*/
sig = SIGBUS;
code = BUS_ADRERR;
} else {
/*
* Something tried to access memory that
* isn't in our memory map..
*/
sig = SIGSEGV;
code = fault == VM_FAULT_BADACCESS ?
SEGV_ACCERR : SEGV_MAPERR;
}
__do_user_fault(tsk, addr, fsr, sig, code, regs);
return 0;
no_context:
__do_kernel_fault(mm, addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
static int
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_MMU */
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
#ifdef CONFIG_MMU
static int __kprobes
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
unsigned int index;
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
if (user_mode(regs))
goto bad_area;
index = pgd_index(addr);
/*
* FIXME: CP15 C1 is write only on ARMv3 architectures.
*/
pgd = cpu_get_pgd() + index;
pgd_k = init_mm.pgd + index;
if (pgd_none(*pgd_k))
goto bad_area;
if (!pgd_present(*pgd))
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, addr);
pud_k = pud_offset(pgd_k, addr);
if (pud_none(*pud_k))
goto bad_area;
if (!pud_present(*pud))
set_pud(pud, *pud_k);
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
/*
* On ARM one Linux PGD entry contains two hardware entries (see page
* tables layout in pgtable.h). We normally guarantee that we always
* fill both L1 entries. But create_mapping() doesn't follow the rule.
* It can create inidividual L1 entries, so here we have to call
* pmd_none() check for the entry really corresponded to address, not
* for the first of pair.
*/
index = (addr >> SECTION_SHIFT) & 1;
if (pmd_none(pmd_k[index]))
goto bad_area;
copy_pmd(pmd, pmd_k);
return 0;
bad_area:
do_bad_area(addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
static int
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_MMU */
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
do_bad_area(addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
int code;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, 0, "vector exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
BUG();
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].code = code;
fsr_info[nr].name = name;
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
arm_notify_die("", regs, &info, fsr, 0);
}
static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
{ do_bad, SIGBUS, 0, "unknown 4" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 10" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "unknown 20" },
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, 0, "unknown 22" },
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "unknown 24" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
void __init
hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
BUG();
ifsr_info[nr].fn = fn;
ifsr_info[nr].sig = sig;
ifsr_info[nr].code = code;
ifsr_info[nr].name = name;
}
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
struct siginfo info;
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
arm_notify_die("", regs, &info, ifsr, 0);
}
static int __init exceptions_init(void)
{
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
"I-cache maintenance fault");
}
if (cpu_architecture() >= CPU_ARCH_ARMv7) {
/*
* TODO: Access flag faults introduced in ARMv6K.
* Runtime check for 'K' extension is needed
*/
hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
"section access flag fault");
hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
"section access flag fault");
}
return 0;
}
arch_initcall(exceptions_init);
| /*
* linux/arch/arm/mm/fault.c
*
* Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "fault.h"
/*
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
}
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
int ret = 0;
if (!user_mode(regs)) {
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, fsr))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
{
return 0;
}
#endif
/*
* This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'.
*/
void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
if (!mm)
mm = &init_mm;
printk(KERN_ALERT "pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
printk(KERN_ALERT "[%08lx] *pgd=%08llx",
addr, (long long)pgd_val(*pgd));
do {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (pgd_none(*pgd))
break;
if (pgd_bad(*pgd)) {
printk("(bad)");
break;
}
pud = pud_offset(pgd, addr);
if (PTRS_PER_PUD != 1)
printk(", *pud=%08lx", pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
printk("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
printk("(bad)");
break;
}
/* We must not map this if we have highmem enabled */
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08llx", (long long)pte_val(*pte));
printk(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
pte_unmap(pte);
} while(0);
printk("\n");
}
#else /* CONFIG_MMU */
void show_pte(struct mm_struct *mm, unsigned long addr)
{ }
#endif /* CONFIG_MMU */
/*
* Oops. The kernel tried to access some page that wasn't present.
*/
static void
__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
/*
* Are we prepared to handle this kernel fault?
*/
if (fixup_exception(regs))
return;
/*
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT
"Unable to handle kernel %s at virtual address %08lx\n",
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
show_pte(mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
do_exit(SIGKILL);
}
/*
* Something tried to access memory that isn't in our memory map..
* User mode accesses just cause a SIGSEGV
*/
static void
__do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int fsr, unsigned int sig, int code,
struct pt_regs *regs)
{
struct siginfo si;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_SEGV) {
printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
tsk->comm, sig, addr, fsr);
show_pte(tsk->mm, addr);
show_regs(regs);
}
#endif
tsk->thread.address = addr;
tsk->thread.error_code = fsr;
tsk->thread.trap_no = 14;
si.si_signo = sig;
si.si_errno = 0;
si.si_code = code;
si.si_addr = (void __user *)addr;
force_sig_info(sig, &si, tsk);
}
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (user_mode(regs))
__do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
else
__do_kernel_fault(mm, addr, fsr, regs);
}
#ifdef CONFIG_MMU
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
/*
* Check that the permissions on the VMA allow for the fault which occurred.
* If we encountered a write fault, we must have write permission, otherwise
* we allow any permission.
*/
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
if (fsr & FSR_WRITE)
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
return vma->vm_flags & mask ? false : true;
}
static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
vma = find_vma(mm, addr);
fault = VM_FAULT_BADMAP;
if (unlikely(!vma))
goto out;
if (unlikely(vma->vm_start > addr))
goto check_stack;
/*
* Ok, we have a good vm_area for this
* memory access, so we can handle it.
*/
good_area:
if (access_error(fsr, vma)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR))
return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
return fault;
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
goto good_area;
out:
return fault;
}
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
if (notify_page_fault(regs, fsr))
return 0;
tsk = current;
mm = tsk->mm;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case, we'll have missed the might_sleep() from
* down_read()
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) &&
!search_exception_tables(regs->ARM_pc))
goto no_context;
#endif
}
fault = __do_page_fault(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (fault & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
else if (fault & VM_FAULT_MINOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return to
* userspace (which will retry the fault, or kill us if we
* got oom-killed)
*/
pagefault_out_of_memory();
return 0;
}
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
*/
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_SIGBUS) {
/*
* We had some memory, but were unable to
* successfully fix up this page fault.
*/
sig = SIGBUS;
code = BUS_ADRERR;
} else {
/*
* Something tried to access memory that
* isn't in our memory map..
*/
sig = SIGSEGV;
code = fault == VM_FAULT_BADACCESS ?
SEGV_ACCERR : SEGV_MAPERR;
}
__do_user_fault(tsk, addr, fsr, sig, code, regs);
return 0;
no_context:
__do_kernel_fault(mm, addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
static int
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_MMU */
/*
* First Level Translation Fault Handler
*
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
*/
#ifdef CONFIG_MMU
static int __kprobes
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
unsigned int index;
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
if (user_mode(regs))
goto bad_area;
index = pgd_index(addr);
/*
* FIXME: CP15 C1 is write only on ARMv3 architectures.
*/
pgd = cpu_get_pgd() + index;
pgd_k = init_mm.pgd + index;
if (pgd_none(*pgd_k))
goto bad_area;
if (!pgd_present(*pgd))
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, addr);
pud_k = pud_offset(pgd_k, addr);
if (pud_none(*pud_k))
goto bad_area;
if (!pud_present(*pud))
set_pud(pud, *pud_k);
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
/*
* On ARM one Linux PGD entry contains two hardware entries (see page
* tables layout in pgtable.h). We normally guarantee that we always
* fill both L1 entries. But create_mapping() doesn't follow the rule.
* It can create inidividual L1 entries, so here we have to call
* pmd_none() check for the entry really corresponded to address, not
* for the first of pair.
*/
index = (addr >> SECTION_SHIFT) & 1;
if (pmd_none(pmd_k[index]))
goto bad_area;
copy_pmd(pmd, pmd_k);
return 0;
bad_area:
do_bad_area(addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
static int
do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_MMU */
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
do_bad_area(addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
int code;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, 0, "vector exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
BUG();
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].code = code;
fsr_info[nr].name = name;
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void __exception
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
arm_notify_die("", regs, &info, fsr, 0);
}
static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
{ do_bad, SIGBUS, 0, "unknown 4" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 10" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "unknown 20" },
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, 0, "unknown 22" },
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "unknown 24" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
void __init
hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
{
if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
BUG();
ifsr_info[nr].fn = fn;
ifsr_info[nr].sig = sig;
ifsr_info[nr].code = code;
ifsr_info[nr].name = name;
}
asmlinkage void __exception
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
struct siginfo info;
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
info.si_addr = (void __user *)addr;
arm_notify_die("", regs, &info, ifsr, 0);
}
static int __init exceptions_init(void)
{
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
"I-cache maintenance fault");
}
if (cpu_architecture() >= CPU_ARCH_ARMv7) {
/*
* TODO: Access flag faults introduced in ARMv6K.
* Runtime check for 'K' extension is needed
*/
hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
"section access flag fault");
hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
"section access flag fault");
}
return 0;
}
arch_initcall(exceptions_init);
|
4,278,211,567,940 | /*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/stacktrace.h>
#include <asm/time.h> /* For perf_irq */
/* These are for 32bit counters. For 64bit ones, define them accordingly. */
#define MAX_PERIOD ((1ULL << 32) - 1)
#define VALID_COUNT 0x7fffffff
#define TOTAL_BITS 32
#define HIGHEST_BIT 31
#define MIPS_MAX_HWEVENTS 4
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event *events[MIPS_MAX_HWEVENTS];
/*
* Set the bit (indexed by the counter number) when the counter
* is used for an event.
*/
unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* The borrowed MSB for the performance counter. A MIPS performance
* counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
* counters) as a factor of determining whether a counter overflow
* should be signaled. So here we use a separate MSB for each
* counter to make things easy.
*/
unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* Software copy of the control register for each performance counter.
* MIPS CPUs vary in performance counters. They use this differently,
* and even may not use it.
*/
unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of MIPS performance events. */
struct mips_perf_event {
unsigned int event_id;
/*
* MIPS performance counters are indexed starting from 0.
* CNTR_EVEN indicates the indexes of the counters to be used are
* even numbers.
*/
unsigned int cntr_mask;
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0,
V = 1,
P = 2,
} range;
#else
#define T
#define V
#define P
#endif
};
static struct mips_perf_event raw_event;
static DEFINE_MUTEX(raw_event_mutex);
#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
#define C(x) PERF_COUNT_HW_CACHE_##x
struct mips_pmu {
const char *name;
int irq;
irqreturn_t (*handle_irq)(int irq, void *dev);
int (*handle_shared_irq)(void);
void (*start)(void);
void (*stop)(void);
int (*alloc_counter)(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc);
u64 (*read_counter)(unsigned int idx);
void (*write_counter)(unsigned int idx, u64 val);
void (*enable_event)(struct hw_perf_event *evt, int idx);
void (*disable_event)(int idx);
const struct mips_perf_event *(*map_raw_event)(u64 config);
const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
const struct mips_perf_event (*cache_event_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
unsigned int num_counters;
};
static const struct mips_pmu *mipspmu;
static int
mipspmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
u64 uleft;
unsigned long flags;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > (s64)MAX_PERIOD)
left = MAX_PERIOD;
local64_set(&hwc->prev_count, (u64)-left);
local_irq_save(flags);
uleft = (u64)(-left) & MAX_PERIOD;
uleft > VALID_COUNT ?
set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
local_irq_restore(flags);
perf_event_update_userpage(event);
return ret;
}
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long flags;
int shift = 64 - TOTAL_BITS;
s64 prev_raw_count, new_raw_count;
u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
local_irq_save(flags);
/* Make the counter value be a "real" one. */
new_raw_count = mipspmu->read_counter(idx);
if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
new_raw_count &= VALID_COUNT;
clear_bit(idx, cpuc->msbs);
} else
new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
local_irq_restore(flags);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return;
}
static void mipspmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!mipspmu)
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, hwc->idx);
/* Enable the event. */
mipspmu->enable_event(hwc, hwc->idx);
}
static void mipspmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!mipspmu)
return;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
mipspmu->disable_event(hwc->idx);
barrier();
mipspmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* To look for a free counter for this event. */
idx = mipspmu->alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipspmu->disable_event(idx);
cpuc->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
mipspmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static void mipspmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
mipspmu_event_update(event, hwc, hwc->idx);
}
static void mipspmu_enable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->start();
}
static void mipspmu_disable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->stop();
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
static int (*save_perf_irq)(void);
static int mipspmu_get_irq(void)
{
int err;
if (mipspmu->irq >= 0) {
/* Request my own irq handler. */
err = request_irq(mipspmu->irq, mipspmu->handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"mips_perf_pmu", NULL);
if (err) {
pr_warning("Unable to request IRQ%d for MIPS "
"performance counters!\n", mipspmu->irq);
}
} else if (cp0_perfcount_irq < 0) {
/*
* We are sharing the irq number with the timer interrupt.
*/
save_perf_irq = perf_irq;
perf_irq = mipspmu->handle_shared_irq;
err = 0;
} else {
pr_warning("The platform hasn't properly defined its "
"interrupt controller.\n");
err = -ENOENT;
}
return err;
}
static void mipspmu_free_irq(void)
{
if (mipspmu->irq >= 0)
free_irq(mipspmu->irq, NULL);
else if (cp0_perfcount_irq < 0)
perf_irq = save_perf_irq;
}
/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu->num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}
static int mipspmu_event_init(struct perf_event *event)
{
int err = 0;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!mipspmu || event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
return -ENOSPC;
}
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
static struct pmu pmu = {
.pmu_enable = mipspmu_enable,
.pmu_disable = mipspmu_disable,
.event_init = mipspmu_event_init,
.add = mipspmu_add,
.del = mipspmu_del,
.start = mipspmu_start,
.stop = mipspmu_stop,
.read = mipspmu_read,
};
static inline unsigned int
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
/*
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
return ((unsigned int)pev->range << 24) |
(pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#else
return (pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#endif
}
static const struct mips_perf_event *
mipspmu_map_general_event(int idx)
{
const struct mips_perf_event *pev;
pev = ((*mipspmu->general_event_map)[idx].event_id ==
UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
&(*mipspmu->general_event_map)[idx]);
return pev;
}
static const struct mips_perf_event *
mipspmu_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct mips_perf_event *pev;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pev = &((*mipspmu->cache_event_map)
[cache_type]
[cache_op]
[cache_result]);
if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
return ERR_PTR(-EOPNOTSUPP);
return pev;
}
static int validate_event(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct hw_perf_event fake_hwc = event->hw;
/* Allow mixed event group. So return 1 to pass validation. */
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_cpuc;
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
return -ENOSPC;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
return -ENOSPC;
}
if (!validate_event(&fake_cpuc, event))
return -ENOSPC;
return 0;
}
/* This is needed by specific irq handlers in perf_event_*.c */
static void
handle_associated_event(struct cpu_hw_events *cpuc,
int idx, struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc = &event->hw;
mipspmu_event_update(event, hwc, idx);
data->period = event->hw.last_period;
if (!mipspmu_event_set_period(event, hwc, idx))
return;
if (perf_event_overflow(event, 0, data, regs))
mipspmu->disable_event(idx);
}
#include "perf_event_mipsxx.c"
/* Callchain handling code. */
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we add here.
*/
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
}
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_perf_callchain(entry, sp);
return;
}
do {
perf_callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_perf_callchain(entry, sp);
#endif
}
| /*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/stacktrace.h>
#include <asm/time.h> /* For perf_irq */
/* These are for 32bit counters. For 64bit ones, define them accordingly. */
#define MAX_PERIOD ((1ULL << 32) - 1)
#define VALID_COUNT 0x7fffffff
#define TOTAL_BITS 32
#define HIGHEST_BIT 31
#define MIPS_MAX_HWEVENTS 4
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event *events[MIPS_MAX_HWEVENTS];
/*
* Set the bit (indexed by the counter number) when the counter
* is used for an event.
*/
unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* The borrowed MSB for the performance counter. A MIPS performance
* counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
* counters) as a factor of determining whether a counter overflow
* should be signaled. So here we use a separate MSB for each
* counter to make things easy.
*/
unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* Software copy of the control register for each performance counter.
* MIPS CPUs vary in performance counters. They use this differently,
* and even may not use it.
*/
unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of MIPS performance events. */
struct mips_perf_event {
unsigned int event_id;
/*
* MIPS performance counters are indexed starting from 0.
* CNTR_EVEN indicates the indexes of the counters to be used are
* even numbers.
*/
unsigned int cntr_mask;
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0,
V = 1,
P = 2,
} range;
#else
#define T
#define V
#define P
#endif
};
static struct mips_perf_event raw_event;
static DEFINE_MUTEX(raw_event_mutex);
#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
#define C(x) PERF_COUNT_HW_CACHE_##x
struct mips_pmu {
const char *name;
int irq;
irqreturn_t (*handle_irq)(int irq, void *dev);
int (*handle_shared_irq)(void);
void (*start)(void);
void (*stop)(void);
int (*alloc_counter)(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc);
u64 (*read_counter)(unsigned int idx);
void (*write_counter)(unsigned int idx, u64 val);
void (*enable_event)(struct hw_perf_event *evt, int idx);
void (*disable_event)(int idx);
const struct mips_perf_event *(*map_raw_event)(u64 config);
const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
const struct mips_perf_event (*cache_event_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
unsigned int num_counters;
};
static const struct mips_pmu *mipspmu;
static int
mipspmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
u64 uleft;
unsigned long flags;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > (s64)MAX_PERIOD)
left = MAX_PERIOD;
local64_set(&hwc->prev_count, (u64)-left);
local_irq_save(flags);
uleft = (u64)(-left) & MAX_PERIOD;
uleft > VALID_COUNT ?
set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
local_irq_restore(flags);
perf_event_update_userpage(event);
return ret;
}
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long flags;
int shift = 64 - TOTAL_BITS;
s64 prev_raw_count, new_raw_count;
u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
local_irq_save(flags);
/* Make the counter value be a "real" one. */
new_raw_count = mipspmu->read_counter(idx);
if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
new_raw_count &= VALID_COUNT;
clear_bit(idx, cpuc->msbs);
} else
new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
local_irq_restore(flags);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return;
}
static void mipspmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!mipspmu)
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, hwc->idx);
/* Enable the event. */
mipspmu->enable_event(hwc, hwc->idx);
}
static void mipspmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!mipspmu)
return;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
mipspmu->disable_event(hwc->idx);
barrier();
mipspmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* To look for a free counter for this event. */
idx = mipspmu->alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipspmu->disable_event(idx);
cpuc->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
mipspmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static void mipspmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
mipspmu_event_update(event, hwc, hwc->idx);
}
static void mipspmu_enable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->start();
}
static void mipspmu_disable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->stop();
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
static int (*save_perf_irq)(void);
static int mipspmu_get_irq(void)
{
int err;
if (mipspmu->irq >= 0) {
/* Request my own irq handler. */
err = request_irq(mipspmu->irq, mipspmu->handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"mips_perf_pmu", NULL);
if (err) {
pr_warning("Unable to request IRQ%d for MIPS "
"performance counters!\n", mipspmu->irq);
}
} else if (cp0_perfcount_irq < 0) {
/*
* We are sharing the irq number with the timer interrupt.
*/
save_perf_irq = perf_irq;
perf_irq = mipspmu->handle_shared_irq;
err = 0;
} else {
pr_warning("The platform hasn't properly defined its "
"interrupt controller.\n");
err = -ENOENT;
}
return err;
}
static void mipspmu_free_irq(void)
{
if (mipspmu->irq >= 0)
free_irq(mipspmu->irq, NULL);
else if (cp0_perfcount_irq < 0)
perf_irq = save_perf_irq;
}
/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu->num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}
static int mipspmu_event_init(struct perf_event *event)
{
int err = 0;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!mipspmu || event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
return -ENOSPC;
}
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
static struct pmu pmu = {
.pmu_enable = mipspmu_enable,
.pmu_disable = mipspmu_disable,
.event_init = mipspmu_event_init,
.add = mipspmu_add,
.del = mipspmu_del,
.start = mipspmu_start,
.stop = mipspmu_stop,
.read = mipspmu_read,
};
static inline unsigned int
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
/*
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
return ((unsigned int)pev->range << 24) |
(pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#else
return (pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#endif
}
static const struct mips_perf_event *
mipspmu_map_general_event(int idx)
{
const struct mips_perf_event *pev;
pev = ((*mipspmu->general_event_map)[idx].event_id ==
UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
&(*mipspmu->general_event_map)[idx]);
return pev;
}
static const struct mips_perf_event *
mipspmu_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct mips_perf_event *pev;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pev = &((*mipspmu->cache_event_map)
[cache_type]
[cache_op]
[cache_result]);
if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
return ERR_PTR(-EOPNOTSUPP);
return pev;
}
static int validate_event(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct hw_perf_event fake_hwc = event->hw;
/* Allow mixed event group. So return 1 to pass validation. */
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_cpuc;
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
return -ENOSPC;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
return -ENOSPC;
}
if (!validate_event(&fake_cpuc, event))
return -ENOSPC;
return 0;
}
/* This is needed by specific irq handlers in perf_event_*.c */
static void
handle_associated_event(struct cpu_hw_events *cpuc,
int idx, struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc = &event->hw;
mipspmu_event_update(event, hwc, idx);
data->period = event->hw.last_period;
if (!mipspmu_event_set_period(event, hwc, idx))
return;
if (perf_event_overflow(event, data, regs))
mipspmu->disable_event(idx);
}
#include "perf_event_mipsxx.c"
/* Callchain handling code. */
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we add here.
*/
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
}
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_perf_callchain(entry, sp);
return;
}
do {
perf_callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_perf_callchain(entry, sp);
#endif
}
|
37,057,244,860,504 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
* Copyright (C) 1995, 1996 Paul M. Antoine
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
*/
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cop2.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/watch.h>
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
#include <asm/uasm.h>
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_tlbm(void);
extern asmlinkage void handle_tlbl(void);
extern asmlinkage void handle_tlbs(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
static void show_raw_backtrace(unsigned long reg29)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
printk(" (Bad stack address)");
break;
}
if (__kernel_text_address(addr))
print_ip_sym(addr);
}
printk("\n");
}
#ifdef CONFIG_KALLSYMS
int raw_show_trace;
static int __init set_raw_show_trace(char *str)
{
raw_show_trace = 1;
return 1;
}
__setup("raw_show_trace", set_raw_show_trace);
#endif
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
}
printk("Call Trace:\n");
do {
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
printk("\n");
}
/*
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
printk("\n ");
if (i > 39) {
printk(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
printk(" (Bad stack address)");
break;
}
printk(" %0*lx", field, stackdata);
i++;
}
printk("\n");
show_backtrace(task, regs);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
regs.cp0_epc = 0;
} else {
if (task && task != current) {
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
#ifdef CONFIG_KGDB_KDB
} else if (atomic_read(&kgdb_active) != -1 &&
kdb_current_regs) {
memcpy(®s, kdb_current_regs, sizeof(regs));
#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(®s);
}
}
show_stacktrace(task, ®s);
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
struct pt_regs regs;
prepare_frametrace(®s);
show_backtrace(current, ®s);
}
EXPORT_SYMBOL(dump_stack);
static void show_code(unsigned int __user *pc)
{
long i;
unsigned short __user *pc16 = NULL;
printk("\nCode:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
break;
}
printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
}
static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
int i;
printk("Cpu %d\n", smp_processor_id());
/*
* Saved main processor registers
*/
for (i = 0; i < 32; ) {
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
printk(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
printk(" %*s", field, "");
else
printk(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
printk("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
printk("Acx : %0*lx\n", field, regs->acx);
#endif
printk("Hi : %0*lx\n", field, regs->hi);
printk("Lo : %0*lx\n", field, regs->lo);
/*
* Saved cp0 registers
*/
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
(void *) regs->cp0_epc);
printk(" %s\n", print_tainted());
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
printk("Status: %08x ", (uint32_t) regs->cp0_status);
if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
if (regs->cp0_status & ST0_KUO)
printk("KUo ");
if (regs->cp0_status & ST0_IEO)
printk("IEo ");
if (regs->cp0_status & ST0_KUP)
printk("KUp ");
if (regs->cp0_status & ST0_IEP)
printk("IEp ");
if (regs->cp0_status & ST0_KUC)
printk("KUc ");
if (regs->cp0_status & ST0_IEC)
printk("IEc ");
} else {
if (regs->cp0_status & ST0_KX)
printk("KX ");
if (regs->cp0_status & ST0_SX)
printk("SX ");
if (regs->cp0_status & ST0_UX)
printk("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
printk("USER ");
break;
case KSU_SUPERVISOR:
printk("SUPERVISOR ");
break;
case KSU_KERNEL:
printk("KERNEL ");
break;
default:
printk("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
printk("ERL ");
if (regs->cp0_status & ST0_EXL)
printk("EXL ");
if (regs->cp0_status & ST0_IE)
printk("IE ");
}
printk("\n");
printk("Cause : %08x\n", cause);
cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
printk("PrId : %08x (%s)\n", read_c0_prid(),
cpu_name_string());
}
/*
* FIXME: really the generic show_regs should take a const pointer argument.
*/
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
}
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
__show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
current->comm, current->pid, current_thread_info(), current,
field, current_thread_info()->tp_value);
if (cpu_has_userlocal) {
unsigned long tls;
tls = read_c0_userlocal();
if (tls != current_thread_info()->tp_value)
printk("*HwTLS: %0*lx\n", field, tls);
}
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
}
static int regs_to_trapnr(struct pt_regs *regs)
{
return (regs->cp0_cause >> 2) & 0x1f;
}
static DEFINE_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
do_exit(sig);
}
extern struct exception_table_entry __start___dbe_table[];
extern struct exception_table_entry __stop___dbe_table[];
__asm__(
" .section __dbe_table, \"a\"\n"
" .previous \n");
/* Given an address, look for it in the exception tables. */
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
if (!e)
e = search_module_dbetables(addr);
return e;
}
asmlinkage void do_be(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
const struct exception_table_entry *fixup = NULL;
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
/* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
if (fixup)
action = MIPS_BE_FIXUP;
if (board_be_handler)
action = board_be_handler(regs, fixup != NULL);
switch (action) {
case MIPS_BE_DISCARD:
return;
case MIPS_BE_FIXUP:
if (fixup) {
regs->cp0_epc = fixup->nextinsn;
return;
}
break;
default:
break;
}
/*
* Assume it would be too dangerous to continue ...
*/
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
== NOTIFY_STOP)
return;
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
}
/*
* ll/sc, rdhwr, sync emulation
*/
#define OPCODE 0xfc000000
#define BASE 0x03e00000
#define RT 0x001f0000
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
#define SPEC0 0x00000000
#define SPEC3 0x7c000000
#define RD 0x0000f800
#define FUNC 0x0000003f
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
/*
* The ll_bit is cleared by r*_switch.S
*/
unsigned int ll_bit;
struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
unsigned long value, __user *vaddr;
long offset;
/*
* analyse the ll instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
if (get_user(value, vaddr))
return SIGSEGV;
preempt_disable();
if (ll_task == NULL || ll_task == current) {
ll_bit = 1;
} else {
ll_bit = 0;
}
ll_task = current;
preempt_enable();
regs->regs[(opcode & RT) >> 16] = value;
return 0;
}
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
{
unsigned long __user *vaddr;
unsigned long reg;
long offset;
/*
* analyse the sc instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
return SIGBUS;
preempt_disable();
if (ll_bit == 0 || ll_task != current) {
regs->regs[reg] = 0;
preempt_enable();
return 0;
}
preempt_enable();
if (put_user(regs->regs[reg], vaddr))
return SIGSEGV;
regs->regs[reg] = 1;
return 0;
}
/*
* ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
* opcodes are supposed to result in coprocessor unusable exceptions if
* executed on ll/sc-less processors. That's the theory. In practice a
* few processors such as NEC's VR4100 throw reserved instruction exceptions
* instead, so we're doing the emulation thing in both exception handlers.
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
return simulate_ll(regs, opcode);
}
if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
return simulate_sc(regs, opcode);
}
return -1; /* Must be something else ... */
}
/*
* Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware.
*/
static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
{
struct thread_info *ti = task_thread_info(current);
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
case 1: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
case 2: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
case 3: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
regs->regs[rt] = 1;
break;
default:
regs->regs[rt] = 2;
}
return 0;
case 29:
regs->regs[rt] = ti->tp_value;
return 0;
default:
return -1;
}
}
/* Not ours. */
return -1;
}
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
return 0;
}
return -1; /* Must be something else ... */
}
asmlinkage void do_ov(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
static int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
si.si_signo = sig;
if (sig == SIGSEGV) {
if (find_vma(current->mm, (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
} else {
si.si_code = BUS_ADRERR;
}
force_sig_info(sig, &si, current);
return 1;
} else if (sig) {
force_sig(sig, current);
return 1;
} else {
return 0;
}
}
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
siginfo_t info = {0};
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
== NOTIFY_STOP)
return;
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
void __user *fault_addr = NULL;
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
*
* Force FPU to dump state into task/thread context. We're
* moving a lot of data here for what is probably a single
* instruction, but the alternative is to pre-decode the FP
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
/* Ensure 'resume' not overwrite saved fp context again. */
lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
return;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
info.si_code = FPE_FLTDIV;
else if (fcr31 & FPU_CSR_OVF_X)
info.si_code = FPE_FLTOVF;
else if (fcr31 & FPU_CSR_UDF_X)
info.si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
info.si_code = FPE_FLTRES;
else
info.si_code = __SI_FAULT;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
siginfo_t info;
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
/*
* A short test says that IRIX 5.3 sends SIGTRAP for all trap
* insns, even for trap and break codes that indicate arithmetic
* failures. Weird ...
* But should we continue the brokenness??? --macro
*/
switch (code) {
case BRK_OVERFLOW:
case BRK_DIVZERO:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
if (code == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
die_if_kernel("Kernel bug detected", regs);
force_sig(SIGTRAP, current);
break;
case BRK_MEMU:
/*
* Address errors may be deliberately induced by the FPU
* emulator to retake control of the CPU after executing the
* instruction in the delay slot of an emulated branch.
*
* Terminate if exception was recognized as a delay slot return
* otherwise handle as normal.
*/
if (do_dsemulret(regs))
return;
die_if_kernel("Math emu break/trap", regs);
force_sig(SIGTRAP, current);
break;
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
force_sig(SIGTRAP, current);
}
}
asmlinkage void do_bp(struct pt_regs *regs)
{
unsigned int opcode, bcode;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
* There is the ancient bug in the MIPS assemblers that the break
* code starts left to bit 16 instead to bit 6 in the opcode.
* Gas is bug-compatible, but not always, grrr...
* We handle both cases with a simple heuristics. --macro
*/
bcode = ((opcode >> 6) & ((1 << 20) - 1));
if (bcode >= (1 << 10))
bcode >>= 10;
/*
* notify the kprobe handlers, if instruction is likely to
* pertain to them.
*/
switch (bcode) {
case BRK_KPROBE_BP:
if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
else
break;
case BRK_KPROBE_SSTEPBP:
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
else
break;
default:
break;
}
do_trap_or_bp(regs, bcode, "Break");
return;
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_tr(struct pt_regs *regs)
{
unsigned int opcode, tcode = 0;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = ((opcode >> 6) & ((1 << 10) - 1));
do_trap_or_bp(regs, tcode, "Trap");
return;
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc;
unsigned int opcode = 0;
int status = -1;
if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
== NOTIFY_STOP)
return;
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(compute_return_epc(regs) < 0))
return;
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr(regs, opcode);
if (status < 0)
status = simulate_sync(regs, opcode);
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
}
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
= current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(cu2_chain);
int __ref register_cu2_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cu2_chain, nb);
}
int cu2_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&cu2_chain, val, v);
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
void *data)
{
struct pt_regs *regs = data;
switch (action) {
default:
die_if_kernel("Unhandled kernel unaligned access or invalid "
"instruction", regs);
/* Fall through */
case CU2_EXCEPTION:
force_sig(SIGILL, current);
}
return NOTIFY_OK;
}
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
unsigned long old_epc;
unsigned int opcode;
unsigned int cpid;
int status;
unsigned long __maybe_unused flags;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
return;
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr(regs, opcode);
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
return;
case 1:
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
init_fpu();
set_used_math();
}
if (!raw_cpu_has_fpu) {
int sig;
void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu,
0, &fault_addr);
if (!process_fpemu_return(sig, fault_addr))
mt_ase_fp_affinity();
}
return;
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
return;
case 3:
break;
}
force_sig(SIGILL, current);
}
asmlinkage void do_mdmx(struct pt_regs *regs)
{
force_sig(SIGILL, current);
}
/*
* Called with interrupts disabled.
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
u32 cause;
/*
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
*/
cause = read_c0_cause();
cause &= ~(1 << 22);
write_c0_cause(cause);
/*
* If the current thread has the watch registers loaded, save
* their values and send SIGTRAP. Otherwise another thread
* left the registers set, clear them and continue.
*/
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
force_sig(SIGTRAP, current);
} else {
mips_clear_watch_registers();
local_irq_enable();
}
}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
show_regs(regs);
if (multi_match) {
printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
printk("\n");
dump_tlb_all();
}
show_code((unsigned int __user *) regs->cp0_epc);
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
{
int subcode;
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
>> VPECONTROL_EXCPT_SHIFT;
switch (subcode) {
case 0:
printk(KERN_DEBUG "Thread Underflow\n");
break;
case 1:
printk(KERN_DEBUG "Thread Overflow\n");
break;
case 2:
printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
break;
case 3:
printk(KERN_DEBUG "Gating Storage Exception\n");
break;
case 4:
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
break;
case 5:
printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
break;
default:
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
subcode);
break;
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
panic("Unexpected DSP exception\n");
force_sig(SIGILL, current);
}
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
(regs->cp0_cause & 0x7f) >> 2);
}
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
l1parity = 0;
return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
l2parity = 0;
return 1;
}
__setup("nol2par", nol2parity);
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
*/
static inline void parity_protection_init(void)
{
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
errctl = read_c0_ecc();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
write_c0_ecc(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
/* probe L2 parity support */
write_c0_ecc(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
if (l1parity ^ l2parity)
errctl |= ERRCTL_L2P;
} else if (l1parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
} else if (l2parity_present) {
if (l2parity)
errctl |= ERRCTL_L2P;
} else {
/* No parity available */
}
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
write_c0_ecc(errctl);
back_to_back_c0_hazard();
errctl = read_c0_ecc();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
printk(KERN_INFO "Cache parity protection %sabled\n",
(errctl & ERRCTL_PE) ? "en" : "dis");
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
printk(KERN_INFO "L2 cache parity protection %sabled\n",
(errctl & ERRCTL_L2P) ? "en" : "dis");
}
}
break;
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
printk(KERN_INFO "Cache parity protection %sabled\n",
(read_c0_ecc() & 0x80000000) ? "en" : "dis");
break;
case CPU_20KC:
case CPU_25KF:
/* Clear the DE bit (bit 16) in the c0_status register. */
printk(KERN_INFO "Enable cache parity protection for "
"MIPS 20KC/25KF CPUs.\n");
clear_c0_status(ST0_DE);
break;
default:
break;
}
}
asmlinkage void cache_parity_error(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
printk("Cache error exception:\n");
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
printk("Error bits: %s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
if (reg_val & (1<<22))
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
if (reg_val & (1<<23))
printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif
panic("Can't handle the cache error!");
}
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
*/
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned long depc, old_epc;
unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
depc = read_c0_depc();
debug = read_c0_debug();
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
if (debug & 0x80000000) {
/*
* In branch delay slot.
* We cheat a little bit here and use EPC to calculate the
* debug return address (DEPC). EPC is restored after the
* calculation.
*/
old_epc = regs->cp0_epc;
regs->cp0_epc = depc;
__compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
} else
depc += 4;
write_c0_depc(depc);
#if 0
printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
write_c0_debug(debug | 0x100);
#endif
}
/*
* NMI exception handler.
*/
NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
{
bust_spinlocks(1);
printk("NMI taken!!!!\n");
die("NMI", regs);
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
unsigned long old_handler = exception_handlers[n];
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
unsigned long jump_mask = ~((1 << 28) - 1);
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
uasm_i_j(&buf, handler & ~jump_mask);
uasm_i_nop(&buf);
} else {
UASM_i_LA(&buf, k0, handler);
uasm_i_jr(&buf, k0);
uasm_i_nop(&buf);
}
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}
static asmlinkage void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
}
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
u32 *w;
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
} else
handler = (unsigned long) addr;
vi_handlers[n] = (unsigned long) addr;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
if (srs >= srssets)
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (srssets > 1)
change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and a standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
char *vec_start = (cpu_wait == r4k_wait) ?
&rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
* not only with the address of the handler, but with the
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
const int mori_offset = &except_vec_vi_mori - vec_start;
#endif /* CONFIG_MIPS_MT_SMTC */
const int handler_len = &except_vec_vi_end - vec_start;
const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic("VECTORSPACING too small");
}
memcpy(b, vec_start, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
w = (u32 *)(b + mori_offset);
*w = (*w & 0xffff0000) | (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
w = (u32 *)(b + lui_offset);
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
w = (u32 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler
*
* It is the handlers responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret"
*/
w = (u32 *)b;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0;
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8));
}
return (void *)old_handler;
}
void *set_vi_handler(int n, vi_handler_t addr)
{
return set_vi_srs_handler(n, addr, 0);
}
extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
/*
* Timer interrupt
*/
int cp0_compare_irq;
int cp0_compare_irq_shift;
/*
* Performance counter IRQ or -1 if shared with timer
*/
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
static int __cpuinitdata noulri;
static int __init ulri_disable(char *s)
{
pr_info("Disabling ulri\n");
noulri = 1;
return 1;
}
__setup("noulri", ulri_disable);
void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
/*
* Only do per_cpu_trap_init() for first TC of Each VPE.
* Note that this hack assumes that the SMTC init code
* assigns TCs consecutively and in ascending order.
*/
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
secondaryTC = 1;
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
if (cpu_has_mips_r2)
hwrena |= 0x0000000f;
if (!noulri && cpu_has_userlocal)
hwrena |= (1 << 29);
if (hwrena)
write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
set_c0_cause(CAUSEF_IV);
evpe(vpflags);
} else
set_c0_cause(CAUSEF_IV);
}
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
*
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = cp0_compare_irq;
cp0_perfcount_irq = -1;
}
#ifdef CONFIG_MIPS_MT_SMTC
}
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP();
}
/* Install CPU exception handler */
void __init set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
static char panic_null_cerr[] __cpuinitdata =
"Trying to set NULL cache error exception handler";
/*
* Install uncached CPU exception handler.
* This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached.
*/
void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
if (!addr)
panic(panic_null_cerr);
memcpy((void *)(uncached_ebase + offset), addr, size);
}
static int __initdata rdhwr_noopt;
static int __init set_rdhwr_noopt(char *str)
{
rdhwr_noopt = 1;
return 1;
}
__setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
int rollback;
check_wait();
rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
ebase = CKSEG0;
if (cpu_has_mips_r2)
ebase += (read_c0_ebase() & 0x3ffff000);
}
per_cpu_trap_init();
/*
* Copy the generic exception handlers to their final destination.
* This will be overriden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
*/
if (cpu_has_watch)
set_except_vector(23, handle_watch);
/*
* Initialise interrupt handlers
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
* it different ways.
*/
parity_protection_init();
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
if (board_be_init)
board_be_init();
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
set_except_vector(4, handle_adel);
set_except_vector(5, handle_ades);
set_except_vector(6, handle_ibe);
set_except_vector(7, handle_dbe);
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
set_except_vector(10, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
* written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
//set_except_vector(14, handle_mc);
//set_except_vector(15, handle_ndc);
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
set_except_vector(24, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
set_except_vector(26, handle_dsp);
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
sort_extable(__start___dbe_table, __stop___dbe_table);
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
| /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
* Copyright (C) 1995, 1996 Paul M. Antoine
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
*/
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cop2.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/watch.h>
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
#include <asm/uasm.h>
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_tlbm(void);
extern asmlinkage void handle_tlbl(void);
extern asmlinkage void handle_tlbs(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
static void show_raw_backtrace(unsigned long reg29)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
printk(" (Bad stack address)");
break;
}
if (__kernel_text_address(addr))
print_ip_sym(addr);
}
printk("\n");
}
#ifdef CONFIG_KALLSYMS
int raw_show_trace;
static int __init set_raw_show_trace(char *str)
{
raw_show_trace = 1;
return 1;
}
__setup("raw_show_trace", set_raw_show_trace);
#endif
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
}
printk("Call Trace:\n");
do {
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
printk("\n");
}
/*
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
printk("\n ");
if (i > 39) {
printk(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
printk(" (Bad stack address)");
break;
}
printk(" %0*lx", field, stackdata);
i++;
}
printk("\n");
show_backtrace(task, regs);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
regs.cp0_epc = 0;
} else {
if (task && task != current) {
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
#ifdef CONFIG_KGDB_KDB
} else if (atomic_read(&kgdb_active) != -1 &&
kdb_current_regs) {
memcpy(®s, kdb_current_regs, sizeof(regs));
#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(®s);
}
}
show_stacktrace(task, ®s);
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
struct pt_regs regs;
prepare_frametrace(®s);
show_backtrace(current, ®s);
}
EXPORT_SYMBOL(dump_stack);
static void show_code(unsigned int __user *pc)
{
long i;
unsigned short __user *pc16 = NULL;
printk("\nCode:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
break;
}
printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
}
static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
int i;
printk("Cpu %d\n", smp_processor_id());
/*
* Saved main processor registers
*/
for (i = 0; i < 32; ) {
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
printk(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
printk(" %*s", field, "");
else
printk(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
printk("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
printk("Acx : %0*lx\n", field, regs->acx);
#endif
printk("Hi : %0*lx\n", field, regs->hi);
printk("Lo : %0*lx\n", field, regs->lo);
/*
* Saved cp0 registers
*/
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
(void *) regs->cp0_epc);
printk(" %s\n", print_tainted());
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
printk("Status: %08x ", (uint32_t) regs->cp0_status);
if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
if (regs->cp0_status & ST0_KUO)
printk("KUo ");
if (regs->cp0_status & ST0_IEO)
printk("IEo ");
if (regs->cp0_status & ST0_KUP)
printk("KUp ");
if (regs->cp0_status & ST0_IEP)
printk("IEp ");
if (regs->cp0_status & ST0_KUC)
printk("KUc ");
if (regs->cp0_status & ST0_IEC)
printk("IEc ");
} else {
if (regs->cp0_status & ST0_KX)
printk("KX ");
if (regs->cp0_status & ST0_SX)
printk("SX ");
if (regs->cp0_status & ST0_UX)
printk("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
printk("USER ");
break;
case KSU_SUPERVISOR:
printk("SUPERVISOR ");
break;
case KSU_KERNEL:
printk("KERNEL ");
break;
default:
printk("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
printk("ERL ");
if (regs->cp0_status & ST0_EXL)
printk("EXL ");
if (regs->cp0_status & ST0_IE)
printk("IE ");
}
printk("\n");
printk("Cause : %08x\n", cause);
cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
printk("PrId : %08x (%s)\n", read_c0_prid(),
cpu_name_string());
}
/*
* FIXME: really the generic show_regs should take a const pointer argument.
*/
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
}
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
__show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
current->comm, current->pid, current_thread_info(), current,
field, current_thread_info()->tp_value);
if (cpu_has_userlocal) {
unsigned long tls;
tls = read_c0_userlocal();
if (tls != current_thread_info()->tp_value)
printk("*HwTLS: %0*lx\n", field, tls);
}
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
}
static int regs_to_trapnr(struct pt_regs *regs)
{
return (regs->cp0_cause >> 2) & 0x1f;
}
static DEFINE_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
do_exit(sig);
}
extern struct exception_table_entry __start___dbe_table[];
extern struct exception_table_entry __stop___dbe_table[];
__asm__(
" .section __dbe_table, \"a\"\n"
" .previous \n");
/* Given an address, look for it in the exception tables. */
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
if (!e)
e = search_module_dbetables(addr);
return e;
}
asmlinkage void do_be(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
const struct exception_table_entry *fixup = NULL;
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
/* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
if (fixup)
action = MIPS_BE_FIXUP;
if (board_be_handler)
action = board_be_handler(regs, fixup != NULL);
switch (action) {
case MIPS_BE_DISCARD:
return;
case MIPS_BE_FIXUP:
if (fixup) {
regs->cp0_epc = fixup->nextinsn;
return;
}
break;
default:
break;
}
/*
* Assume it would be too dangerous to continue ...
*/
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
== NOTIFY_STOP)
return;
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
}
/*
* ll/sc, rdhwr, sync emulation
*/
#define OPCODE 0xfc000000
#define BASE 0x03e00000
#define RT 0x001f0000
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
#define SPEC0 0x00000000
#define SPEC3 0x7c000000
#define RD 0x0000f800
#define FUNC 0x0000003f
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
/*
* The ll_bit is cleared by r*_switch.S
*/
unsigned int ll_bit;
struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
unsigned long value, __user *vaddr;
long offset;
/*
* analyse the ll instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
if (get_user(value, vaddr))
return SIGSEGV;
preempt_disable();
if (ll_task == NULL || ll_task == current) {
ll_bit = 1;
} else {
ll_bit = 0;
}
ll_task = current;
preempt_enable();
regs->regs[(opcode & RT) >> 16] = value;
return 0;
}
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
{
unsigned long __user *vaddr;
unsigned long reg;
long offset;
/*
* analyse the sc instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
return SIGBUS;
preempt_disable();
if (ll_bit == 0 || ll_task != current) {
regs->regs[reg] = 0;
preempt_enable();
return 0;
}
preempt_enable();
if (put_user(regs->regs[reg], vaddr))
return SIGSEGV;
regs->regs[reg] = 1;
return 0;
}
/*
* ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
* opcodes are supposed to result in coprocessor unusable exceptions if
* executed on ll/sc-less processors. That's the theory. In practice a
* few processors such as NEC's VR4100 throw reserved instruction exceptions
* instead, so we're doing the emulation thing in both exception handlers.
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return simulate_ll(regs, opcode);
}
if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return simulate_sc(regs, opcode);
}
return -1; /* Must be something else ... */
}
/*
* Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware.
*/
static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
{
struct thread_info *ti = task_thread_info(current);
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
case 1: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
case 2: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
case 3: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
regs->regs[rt] = 1;
break;
default:
regs->regs[rt] = 2;
}
return 0;
case 29:
regs->regs[rt] = ti->tp_value;
return 0;
default:
return -1;
}
}
/* Not ours. */
return -1;
}
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return 0;
}
return -1; /* Must be something else ... */
}
asmlinkage void do_ov(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
static int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
si.si_signo = sig;
if (sig == SIGSEGV) {
if (find_vma(current->mm, (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
} else {
si.si_code = BUS_ADRERR;
}
force_sig_info(sig, &si, current);
return 1;
} else if (sig) {
force_sig(sig, current);
return 1;
} else {
return 0;
}
}
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
siginfo_t info = {0};
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
== NOTIFY_STOP)
return;
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
void __user *fault_addr = NULL;
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
*
* Force FPU to dump state into task/thread context. We're
* moving a lot of data here for what is probably a single
* instruction, but the alternative is to pre-decode the FP
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
/* Ensure 'resume' not overwrite saved fp context again. */
lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
return;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
info.si_code = FPE_FLTDIV;
else if (fcr31 & FPU_CSR_OVF_X)
info.si_code = FPE_FLTOVF;
else if (fcr31 & FPU_CSR_UDF_X)
info.si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
info.si_code = FPE_FLTRES;
else
info.si_code = __SI_FAULT;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
siginfo_t info;
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
/*
* A short test says that IRIX 5.3 sends SIGTRAP for all trap
* insns, even for trap and break codes that indicate arithmetic
* failures. Weird ...
* But should we continue the brokenness??? --macro
*/
switch (code) {
case BRK_OVERFLOW:
case BRK_DIVZERO:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
if (code == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
die_if_kernel("Kernel bug detected", regs);
force_sig(SIGTRAP, current);
break;
case BRK_MEMU:
/*
* Address errors may be deliberately induced by the FPU
* emulator to retake control of the CPU after executing the
* instruction in the delay slot of an emulated branch.
*
* Terminate if exception was recognized as a delay slot return
* otherwise handle as normal.
*/
if (do_dsemulret(regs))
return;
die_if_kernel("Math emu break/trap", regs);
force_sig(SIGTRAP, current);
break;
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
force_sig(SIGTRAP, current);
}
}
asmlinkage void do_bp(struct pt_regs *regs)
{
unsigned int opcode, bcode;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
* There is the ancient bug in the MIPS assemblers that the break
* code starts left to bit 16 instead to bit 6 in the opcode.
* Gas is bug-compatible, but not always, grrr...
* We handle both cases with a simple heuristics. --macro
*/
bcode = ((opcode >> 6) & ((1 << 20) - 1));
if (bcode >= (1 << 10))
bcode >>= 10;
/*
* notify the kprobe handlers, if instruction is likely to
* pertain to them.
*/
switch (bcode) {
case BRK_KPROBE_BP:
if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
else
break;
case BRK_KPROBE_SSTEPBP:
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
else
break;
default:
break;
}
do_trap_or_bp(regs, bcode, "Break");
return;
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_tr(struct pt_regs *regs)
{
unsigned int opcode, tcode = 0;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = ((opcode >> 6) & ((1 << 10) - 1));
do_trap_or_bp(regs, tcode, "Trap");
return;
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc;
unsigned int opcode = 0;
int status = -1;
if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
== NOTIFY_STOP)
return;
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(compute_return_epc(regs) < 0))
return;
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr(regs, opcode);
if (status < 0)
status = simulate_sync(regs, opcode);
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
}
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
= current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(cu2_chain);
int __ref register_cu2_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cu2_chain, nb);
}
int cu2_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&cu2_chain, val, v);
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
void *data)
{
struct pt_regs *regs = data;
switch (action) {
default:
die_if_kernel("Unhandled kernel unaligned access or invalid "
"instruction", regs);
/* Fall through */
case CU2_EXCEPTION:
force_sig(SIGILL, current);
}
return NOTIFY_OK;
}
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
unsigned long old_epc;
unsigned int opcode;
unsigned int cpid;
int status;
unsigned long __maybe_unused flags;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
return;
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr(regs, opcode);
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
return;
case 1:
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
init_fpu();
set_used_math();
}
if (!raw_cpu_has_fpu) {
int sig;
void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu,
0, &fault_addr);
if (!process_fpemu_return(sig, fault_addr))
mt_ase_fp_affinity();
}
return;
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
return;
case 3:
break;
}
force_sig(SIGILL, current);
}
asmlinkage void do_mdmx(struct pt_regs *regs)
{
force_sig(SIGILL, current);
}
/*
* Called with interrupts disabled.
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
u32 cause;
/*
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
*/
cause = read_c0_cause();
cause &= ~(1 << 22);
write_c0_cause(cause);
/*
* If the current thread has the watch registers loaded, save
* their values and send SIGTRAP. Otherwise another thread
* left the registers set, clear them and continue.
*/
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
force_sig(SIGTRAP, current);
} else {
mips_clear_watch_registers();
local_irq_enable();
}
}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
show_regs(regs);
if (multi_match) {
printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
printk("\n");
dump_tlb_all();
}
show_code((unsigned int __user *) regs->cp0_epc);
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
{
int subcode;
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
>> VPECONTROL_EXCPT_SHIFT;
switch (subcode) {
case 0:
printk(KERN_DEBUG "Thread Underflow\n");
break;
case 1:
printk(KERN_DEBUG "Thread Overflow\n");
break;
case 2:
printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
break;
case 3:
printk(KERN_DEBUG "Gating Storage Exception\n");
break;
case 4:
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
break;
case 5:
printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
break;
default:
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
subcode);
break;
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
panic("Unexpected DSP exception\n");
force_sig(SIGILL, current);
}
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
(regs->cp0_cause & 0x7f) >> 2);
}
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
l1parity = 0;
return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
l2parity = 0;
return 1;
}
__setup("nol2par", nol2parity);
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
*/
static inline void parity_protection_init(void)
{
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
errctl = read_c0_ecc();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
write_c0_ecc(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
/* probe L2 parity support */
write_c0_ecc(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
if (l1parity ^ l2parity)
errctl |= ERRCTL_L2P;
} else if (l1parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
} else if (l2parity_present) {
if (l2parity)
errctl |= ERRCTL_L2P;
} else {
/* No parity available */
}
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
write_c0_ecc(errctl);
back_to_back_c0_hazard();
errctl = read_c0_ecc();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
printk(KERN_INFO "Cache parity protection %sabled\n",
(errctl & ERRCTL_PE) ? "en" : "dis");
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
printk(KERN_INFO "L2 cache parity protection %sabled\n",
(errctl & ERRCTL_L2P) ? "en" : "dis");
}
}
break;
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
printk(KERN_INFO "Cache parity protection %sabled\n",
(read_c0_ecc() & 0x80000000) ? "en" : "dis");
break;
case CPU_20KC:
case CPU_25KF:
/* Clear the DE bit (bit 16) in the c0_status register. */
printk(KERN_INFO "Enable cache parity protection for "
"MIPS 20KC/25KF CPUs.\n");
clear_c0_status(ST0_DE);
break;
default:
break;
}
}
asmlinkage void cache_parity_error(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
printk("Cache error exception:\n");
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
printk("Error bits: %s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
if (reg_val & (1<<22))
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
if (reg_val & (1<<23))
printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif
panic("Can't handle the cache error!");
}
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
*/
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned long depc, old_epc;
unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
depc = read_c0_depc();
debug = read_c0_debug();
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
if (debug & 0x80000000) {
/*
* In branch delay slot.
* We cheat a little bit here and use EPC to calculate the
* debug return address (DEPC). EPC is restored after the
* calculation.
*/
old_epc = regs->cp0_epc;
regs->cp0_epc = depc;
__compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
} else
depc += 4;
write_c0_depc(depc);
#if 0
printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
write_c0_debug(debug | 0x100);
#endif
}
/*
* NMI exception handler.
*/
NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
{
bust_spinlocks(1);
printk("NMI taken!!!!\n");
die("NMI", regs);
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
unsigned long old_handler = exception_handlers[n];
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
unsigned long jump_mask = ~((1 << 28) - 1);
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
uasm_i_j(&buf, handler & ~jump_mask);
uasm_i_nop(&buf);
} else {
UASM_i_LA(&buf, k0, handler);
uasm_i_jr(&buf, k0);
uasm_i_nop(&buf);
}
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}
static asmlinkage void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
}
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
u32 *w;
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
} else
handler = (unsigned long) addr;
vi_handlers[n] = (unsigned long) addr;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
if (srs >= srssets)
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (srssets > 1)
change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and a standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
char *vec_start = (cpu_wait == r4k_wait) ?
&rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
* not only with the address of the handler, but with the
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
const int mori_offset = &except_vec_vi_mori - vec_start;
#endif /* CONFIG_MIPS_MT_SMTC */
const int handler_len = &except_vec_vi_end - vec_start;
const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic("VECTORSPACING too small");
}
memcpy(b, vec_start, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
w = (u32 *)(b + mori_offset);
*w = (*w & 0xffff0000) | (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
w = (u32 *)(b + lui_offset);
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
w = (u32 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler
*
* It is the handlers responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret"
*/
w = (u32 *)b;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0;
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8));
}
return (void *)old_handler;
}
void *set_vi_handler(int n, vi_handler_t addr)
{
return set_vi_srs_handler(n, addr, 0);
}
extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
/*
* Timer interrupt
*/
int cp0_compare_irq;
int cp0_compare_irq_shift;
/*
* Performance counter IRQ or -1 if shared with timer
*/
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
static int __cpuinitdata noulri;
static int __init ulri_disable(char *s)
{
pr_info("Disabling ulri\n");
noulri = 1;
return 1;
}
__setup("noulri", ulri_disable);
void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
/*
* Only do per_cpu_trap_init() for first TC of Each VPE.
* Note that this hack assumes that the SMTC init code
* assigns TCs consecutively and in ascending order.
*/
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
secondaryTC = 1;
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
if (cpu_has_mips_r2)
hwrena |= 0x0000000f;
if (!noulri && cpu_has_userlocal)
hwrena |= (1 << 29);
if (hwrena)
write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
set_c0_cause(CAUSEF_IV);
evpe(vpflags);
} else
set_c0_cause(CAUSEF_IV);
}
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
*
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = cp0_compare_irq;
cp0_perfcount_irq = -1;
}
#ifdef CONFIG_MIPS_MT_SMTC
}
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP();
}
/* Install CPU exception handler */
void __init set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
static char panic_null_cerr[] __cpuinitdata =
"Trying to set NULL cache error exception handler";
/*
* Install uncached CPU exception handler.
* This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached.
*/
void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
if (!addr)
panic(panic_null_cerr);
memcpy((void *)(uncached_ebase + offset), addr, size);
}
static int __initdata rdhwr_noopt;
static int __init set_rdhwr_noopt(char *str)
{
rdhwr_noopt = 1;
return 1;
}
__setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
int rollback;
check_wait();
rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
ebase = CKSEG0;
if (cpu_has_mips_r2)
ebase += (read_c0_ebase() & 0x3ffff000);
}
per_cpu_trap_init();
/*
* Copy the generic exception handlers to their final destination.
* This will be overriden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
*/
if (cpu_has_watch)
set_except_vector(23, handle_watch);
/*
* Initialise interrupt handlers
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
* it different ways.
*/
parity_protection_init();
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
if (board_be_init)
board_be_init();
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
set_except_vector(4, handle_adel);
set_except_vector(5, handle_ades);
set_except_vector(6, handle_ibe);
set_except_vector(7, handle_dbe);
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
set_except_vector(10, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
* written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
//set_except_vector(14, handle_mc);
//set_except_vector(15, handle_ndc);
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
set_except_vector(24, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
set_except_vector(26, handle_dsp);
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
sort_extable(__start___dbe_table, __stop___dbe_table);
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
|
155,088,209,235,273 | /*
* Handle unaligned accesses by emulation.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary.
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected. Much worse is that such code is non-
* portable. Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn't intend to do this at all for user code.
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
* across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
* #include <sys/sysmips.h>
*
* ...
* sysmips(MIPS_FIXADE, x);
* ...
*
* The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
* #include <stdio.h>
* #include <sys/sysmips.h>
*
* struct foo {
* unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
* unsigned int *p = (unsigned int *) (x.bar + 3);
* int i;
*
* if (argc > 1)
* sysmips(MIPS_FIXADE, atoi(argv[1]));
*
* printf("*p = %08lx\n", *p);
*
* *p = 0xdeadface;
*
* for(i = 0; i <= 7; i++)
* printf("%02x ", x.bar[i]);
* printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
* exception for the R6000.
* A store crossing a page boundary might be executed only partially.
* Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#define STR(x) __STR(x)
#define __STR(x) #x
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
UNALIGNED_ACTION_SHOW,
};
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions;
static u32 unaligned_action;
#else
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
/*
* This load never faults.
*/
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
/*
* These are instructions that a compiler doesn't generate. We
* can assume therefore that the code is MIPS-aware and
* really buggy. Emulating these instructions would break the
* semantics anyway.
*/
case ll_op:
case lld_op:
case sc_op:
case scd_op:
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel/supervisor address
* space.
*/
case ldl_op:
case ldr_op:
case lwl_op:
case lwr_op:
case sdl_op:
case sdr_op:
case swl_op:
case swr_op:
case lb_op:
case lbu_op:
case sb_op:
goto sigbus;
/*
* The remaining opcodes are the ones that are really of interest.
*/
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlb\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlb\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (
".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlbu\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlbu\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"dsll\t%0, %0, 32\n\t"
"dsrl\t%0, %0, 32\n\t"
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case ld_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tldl\t%0, (%2)\n"
"2:\tldr\t%0, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tldl\t%0, 7(%2)\n"
"2:\tldr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case sh_op:
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 1(%2)\n\t"
"srl\t$1, %1, 0x8\n"
"2:\tsb\t$1, 0(%2)\n\t"
".set\tat\n\t"
#endif
#ifdef __LITTLE_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 0(%2)\n\t"
"srl\t$1,%1, 0x8\n"
"2:\tsb\t$1, 1(%2)\n\t"
".set\tat\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tswl\t%1,(%2)\n"
"2:\tswr\t%1, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tswl\t%1, 3(%2)\n"
"2:\tswr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sd_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tsdl\t%1,(%2)\n"
"2:\tsdr\t%1, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tsdl\t%1, 7(%2)\n"
"2:\tsdr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
/*
* I herewith declare: this does not happen. So send SIGBUS.
*/
goto sigbus;
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
* whatever they have to do, including possible sending of signals.
*/
case lwc2_op:
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
break;
case ldc2_op:
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
break;
case swc2_op:
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
break;
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS, current);
return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned int __user *pc;
mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
*/
if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
else if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
return;
sigbus:
die_if_kernel("Kernel unaligned instruction access", regs);
force_sig(SIGBUS, current);
/*
* XXX On return from the signal handler we should advance the epc
*/
}
#ifdef CONFIG_DEBUG_FS
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_unaligned(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
if (!d)
return -ENOMEM;
return 0;
}
__initcall(debugfs_unaligned);
#endif
| /*
* Handle unaligned accesses by emulation.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary.
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected. Much worse is that such code is non-
* portable. Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn't intend to do this at all for user code.
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
* across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
* #include <sys/sysmips.h>
*
* ...
* sysmips(MIPS_FIXADE, x);
* ...
*
* The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
* #include <stdio.h>
* #include <sys/sysmips.h>
*
* struct foo {
* unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
* unsigned int *p = (unsigned int *) (x.bar + 3);
* int i;
*
* if (argc > 1)
* sysmips(MIPS_FIXADE, atoi(argv[1]));
*
* printf("*p = %08lx\n", *p);
*
* *p = 0xdeadface;
*
* for(i = 0; i <= 7; i++)
* printf("%02x ", x.bar[i]);
* printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
* exception for the R6000.
* A store crossing a page boundary might be executed only partially.
* Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#define STR(x) __STR(x)
#define __STR(x) #x
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
UNALIGNED_ACTION_SHOW,
};
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions;
static u32 unaligned_action;
#else
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/*
* This load never faults.
*/
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
/*
* These are instructions that a compiler doesn't generate. We
* can assume therefore that the code is MIPS-aware and
* really buggy. Emulating these instructions would break the
* semantics anyway.
*/
case ll_op:
case lld_op:
case sc_op:
case scd_op:
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel/supervisor address
* space.
*/
case ldl_op:
case ldr_op:
case lwl_op:
case lwr_op:
case sdl_op:
case sdr_op:
case swl_op:
case swr_op:
case lb_op:
case lbu_op:
case sb_op:
goto sigbus;
/*
* The remaining opcodes are the ones that are really of interest.
*/
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlb\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlb\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (
".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlbu\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlbu\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"dsll\t%0, %0, 32\n\t"
"dsrl\t%0, %0, 32\n\t"
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case ld_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tldl\t%0, (%2)\n"
"2:\tldr\t%0, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tldl\t%0, 7(%2)\n"
"2:\tldr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case sh_op:
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 1(%2)\n\t"
"srl\t$1, %1, 0x8\n"
"2:\tsb\t$1, 0(%2)\n\t"
".set\tat\n\t"
#endif
#ifdef __LITTLE_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 0(%2)\n\t"
"srl\t$1,%1, 0x8\n"
"2:\tsb\t$1, 1(%2)\n\t"
".set\tat\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tswl\t%1,(%2)\n"
"2:\tswr\t%1, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tswl\t%1, 3(%2)\n"
"2:\tswr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sd_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tsdl\t%1,(%2)\n"
"2:\tsdr\t%1, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tsdl\t%1, 7(%2)\n"
"2:\tsdr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
/*
* I herewith declare: this does not happen. So send SIGBUS.
*/
goto sigbus;
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
* whatever they have to do, including possible sending of signals.
*/
case lwc2_op:
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
break;
case ldc2_op:
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
break;
case swc2_op:
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
break;
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS, current);
return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned int __user *pc;
mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
*/
if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
else if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
return;
sigbus:
die_if_kernel("Kernel unaligned instruction access", regs);
force_sig(SIGBUS, current);
/*
* XXX On return from the signal handler we should advance the epc
*/
}
#ifdef CONFIG_DEBUG_FS
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_unaligned(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
if (!d)
return -ENOMEM;
return 0;
}
__initcall(debugfs_unaligned);
#endif
|
182,136,388,436,586 | /*
* cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator
*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* A complete emulator for MIPS coprocessor 1 instructions. This is
* required for #float(switch) or #float(trap), where it catches all
* COP1 instructions via the "CoProcessor Unusable" exception.
*
* More surprisingly it is also required for #float(ieee), to help out
* the hardware fpu at the boundaries of the IEEE-754 representation
* (denormalised values, infinities, underflow, etc). It is made
* quite nasty because emulation of some non-COP1 instructions is
* required, e.g. in branch delay slots.
*
* Note if you know that you won't have an fpu, then you'll get much
* better performance by compiling with -msoft-float!
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/inst.h>
#include <asm/bootinfo.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/mipsregs.h>
#include <asm/fpu_emulator.h>
#include <asm/uaccess.h>
#include <asm/branch.h>
#include "ieee754.h"
/* Strap kernel emulator for full MIPS IV emulation */
#ifdef __mips
#undef __mips
#endif
#define __mips 4
/* Function which emulates a floating point instruction. */
static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
mips_instruction);
#if __mips >= 4 && __mips != 32
static int fpux_emu(struct pt_regs *,
struct mips_fpu_struct *, mips_instruction, void *__user *);
#endif
/* Further private data for which no space exists in mips_fpu_struct */
#ifdef CONFIG_DEBUG_FS
DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#endif
/* Control registers */
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_CSR 31 /* $31 = csr */
/* Determine rounding mode from the RM bits of the FCSR */
#define modeindex(v) ((v) & FPU_CSR_RM)
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
[FPU_CSR_RZ] = IEEE754_RZ,
[FPU_CSR_RU] = IEEE754_RU,
[FPU_CSR_RD] = IEEE754_RD,
};
/* Convert IEEE library modes to Mips rounding mode (0..3). */
static const unsigned char mips_rm[4] = {
[IEEE754_RN] = FPU_CSR_RN,
[IEEE754_RZ] = FPU_CSR_RZ,
[IEEE754_RD] = FPU_CSR_RD,
[IEEE754_RU] = FPU_CSR_RU,
};
#if __mips >= 4
/* convert condition code register number to csr bit */
static const unsigned int fpucondbit[8] = {
FPU_CSR_COND0,
FPU_CSR_COND1,
FPU_CSR_COND2,
FPU_CSR_COND3,
FPU_CSR_COND4,
FPU_CSR_COND5,
FPU_CSR_COND6,
FPU_CSR_COND7
};
#endif
/*
* Redundant with logic already in kernel/branch.c,
* embedded in compute_return_epc. At some point,
* a single subroutine should be used across both
* modules.
*/
static int isBranchInstr(mips_instruction * i)
{
switch (MIPSInst_OPCODE(*i)) {
case spec_op:
switch (MIPSInst_FUNC(*i)) {
case jalr_op:
case jr_op:
return 1;
}
break;
case bcond_op:
switch (MIPSInst_RT(*i)) {
case bltz_op:
case bgez_op:
case bltzl_op:
case bgezl_op:
case bltzal_op:
case bgezal_op:
case bltzall_op:
case bgezall_op:
return 1;
}
break;
case j_op:
case jal_op:
case jalx_op:
case beq_op:
case bne_op:
case blez_op:
case bgtz_op:
case beql_op:
case bnel_op:
case blezl_op:
case bgtzl_op:
return 1;
case cop0_op:
case cop1_op:
case cop2_op:
case cop1x_op:
if (MIPSInst_RS(*i) == bc_op)
return 1;
break;
}
return 0;
}
/*
* In the Linux kernel, we support selection of FPR format on the
* basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
* 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS
* as a proxy for the FR bit so that a 64-bit FPU is emulated. In any
* case, for a 32-bit kernel which uses the O32 MIPS ABI, only the
* even FPRs are used (Status.FR = 0).
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
if (cpu_has_fpu)
return xcp->cp0_status & ST0_FR;
#ifdef CONFIG_64BIT
return !test_thread_flag(TIF_32BIT_REGS);
#else
return 0;
#endif
}
#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
(int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
cop1_64bit(xcp) || !(x & 1) ? \
ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
#define SPTOREG(sp, x) SITOREG((sp).bits, x)
#define DPFROMREG(dp, x) DIFROMREG((dp).bits, x)
#define DPTOREG(dp, x) DITOREG((dp).bits, x)
/*
* Emulate the single floating point instruction pointed at by EPC.
* Two instructions if the instruction is in a branch delay slot.
*/
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
void *__user *fault_addr)
{
mips_instruction ir;
unsigned long emulpc, contpc;
unsigned int cond;
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
/* XXX NEC Vr54xx bug workaround */
if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
xcp->cp0_cause &= ~CAUSEF_BD;
if (xcp->cp0_cause & CAUSEF_BD) {
/*
* The instruction to be emulated is in a branch delay slot
* which means that we have to emulate the branch instruction
* BEFORE we do the cop1 instruction.
*
* This branch could be a COP1 branch, but in that case we
* would have had a trap for that instruction, and would not
* come through this route.
*
* Linux MIPS branch emulator operates on context, updating the
* cp0_epc.
*/
emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */
if (__compute_return_epc(xcp)) {
#ifdef CP1DBG
printk("failed to emulate branch at %p\n",
(void *) (xcp->cp0_epc));
#endif
return SIGILL;
}
if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) emulpc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGSEGV;
}
/* __compute_return_epc() will have updated cp0_epc */
contpc = xcp->cp0_epc;
/* In order not to confuse ptrace() et al, tweak context */
xcp->cp0_epc = emulpc - 4;
} else {
emulpc = xcp->cp0_epc;
contpc = xcp->cp0_epc + 4;
}
emul:
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_RT(ir));
break;
}
case sdc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
case lwc1_op:{
u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u32 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_RT(ir));
break;
}
case swc1_op:{
u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u32 val;
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
case cop1_op:
switch (MIPSInst_RS(ir)) {
#if defined(__mips64)
case dmfc_op:
/* copregister fs -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case dmtc_op:
/* copregister fs <- rt */
DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
#endif
case mfc_op:
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case mtc_op:
/* copregister rd <- rt */
SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
case cfc_op:{
/* cop control register rd -> gpr[rt] */
u32 value;
if (MIPSInst_RD(ir) == FPCREG_CSR) {
value = ctx->fcr31;
value = (value & ~FPU_CSR_RM) |
mips_rm[modeindex(value)];
#ifdef CSRTRACE
printk("%p gpr[%d]<-csr=%08x\n",
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
}
else if (MIPSInst_RD(ir) == FPCREG_RID)
value = 0;
else
value = 0;
if (MIPSInst_RT(ir))
xcp->regs[MIPSInst_RT(ir)] = value;
break;
}
case ctc_op:{
/* copregister rd <- rt */
u32 value;
if (MIPSInst_RT(ir) == 0)
value = 0;
else
value = xcp->regs[MIPSInst_RT(ir)];
/* we only have one writable control reg
*/
if (MIPSInst_RD(ir) == FPCREG_CSR) {
#ifdef CSRTRACE
printk("%p gpr[%d]->csr=%08x\n",
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
/*
* Don't write reserved bits,
* and convert to ieee library modes
*/
ctx->fcr31 = (value &
~(FPU_CSR_RSVD | FPU_CSR_RM)) |
ieee_rm[modeindex(value)];
}
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
}
break;
}
case bc_op:{
int likely = 0;
if (xcp->cp0_cause & CAUSEF_BD)
return SIGILL;
#if __mips >= 4
cond = ctx->fcr31 & fpucondbit[MIPSInst_RT(ir) >> 2];
#else
cond = ctx->fcr31 & FPU_CSR_COND;
#endif
switch (MIPSInst_RT(ir) & 3) {
case bcfl_op:
likely = 1;
case bcf_op:
cond = !cond;
break;
case bctl_op:
likely = 1;
case bct_op:
break;
default:
/* thats an illegal instruction */
return SIGILL;
}
xcp->cp0_cause |= CAUSEF_BD;
if (cond) {
/* branch taken: emulate dslot
* instruction
*/
xcp->cp0_epc += 4;
contpc = (xcp->cp0_epc +
(MIPSInst_SIMM(ir) << 2));
if (!access_ok(VERIFY_READ, xcp->cp0_epc,
sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir,
(mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
switch (MIPSInst_OPCODE(ir)) {
case lwc1_op:
case swc1_op:
#if (__mips >= 2 || defined(__mips64))
case ldc1_op:
case sdc1_op:
#endif
case cop1_op:
#if __mips >= 4 && __mips != 32
case cop1x_op:
#endif
/* its one of ours */
goto emul;
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) == movc_op)
goto emul;
break;
#endif
}
/*
* Single step the non-cp1
* instruction in the dslot
*/
return mips_dsemul(xcp, ir, contpc);
}
else {
/* branch not taken */
if (likely) {
/*
* branch likely nullifies
* dslot if not taken
*/
xcp->cp0_epc += 4;
contpc += 4;
/*
* else continue & execute
* dslot as normal insn
*/
}
}
break;
}
default:
if (!(MIPSInst_RS(ir) & 0x10))
return SIGILL;
{
int sig;
/* a real fpu computation instruction */
if ((sig = fpu_emu(xcp, ctx, ir)))
return sig;
}
}
break;
#if __mips >= 4 && __mips != 32
case cop1x_op:{
int sig = fpux_emu(xcp, ctx, ir, fault_addr);
if (sig)
return sig;
break;
}
#endif
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) != movc_op)
return SIGILL;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
xcp->regs[MIPSInst_RD(ir)] =
xcp->regs[MIPSInst_RS(ir)];
break;
#endif
default:
return SIGILL;
}
/* we did it !! */
xcp->cp0_epc = contpc;
xcp->cp0_cause &= ~CAUSEF_BD;
return 0;
}
/*
* Conversion table from MIPS compare ops 48-63
* cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
*/
static const unsigned char cmptab[8] = {
0, /* cmp_0 (sig) cmp_sf */
IEEE754_CUN, /* cmp_un (sig) cmp_ngle */
IEEE754_CEQ, /* cmp_eq (sig) cmp_seq */
IEEE754_CEQ | IEEE754_CUN, /* cmp_ueq (sig) cmp_ngl */
IEEE754_CLT, /* cmp_olt (sig) cmp_lt */
IEEE754_CLT | IEEE754_CUN, /* cmp_ult (sig) cmp_nge */
IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
};
#if __mips >= 4 && __mips != 32
/*
* Additional MIPS4 instructions
*/
#define DEF3OP(name, p, f1, f2, f3) \
static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \
ieee754##p t) \
{ \
struct _ieee754_csr ieee754_csr_save; \
s = f1(s, t); \
ieee754_csr_save = ieee754_csr; \
s = f2(s, r); \
ieee754_csr_save.cx |= ieee754_csr.cx; \
ieee754_csr_save.sx |= ieee754_csr.sx; \
s = f3(s); \
ieee754_csr.cx |= ieee754_csr_save.cx; \
ieee754_csr.sx |= ieee754_csr_save.sx; \
return s; \
}
static ieee754dp fpemu_dp_recip(ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), d);
}
static ieee754dp fpemu_dp_rsqrt(ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
}
static ieee754sp fpemu_sp_recip(ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), s);
}
static ieee754sp fpemu_sp_rsqrt(ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
}
DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir, void *__user *fault_addr)
{
unsigned rcsr = 0; /* resulting csr */
MIPS_FPU_EMU_INC_STATS(cp1xops);
switch (MIPSInst_FMA_FFMT(ir)) {
case s_fmt:{ /* 0 */
ieee754sp(*handler) (ieee754sp, ieee754sp, ieee754sp);
ieee754sp fd, fr, fs, ft;
u32 __user *va;
u32 val;
switch (MIPSInst_FUNC(ir)) {
case lwxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_FD(ir));
break;
case swxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_s_op:
handler = fpemu_sp_madd;
goto scoptop;
case msub_s_op:
handler = fpemu_sp_msub;
goto scoptop;
case nmadd_s_op:
handler = fpemu_sp_nmadd;
goto scoptop;
case nmsub_s_op:
handler = fpemu_sp_nmsub;
goto scoptop;
scoptop:
SPFROMREG(fr, MIPSInst_FR(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
SPTOREG(fd, MIPSInst_FD(ir));
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT))
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
if (ieee754_cxtest(IEEE754_UNDERFLOW))
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
if (ieee754_cxtest(IEEE754_OVERFLOW))
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: fpu csr = %08x\n",
ctx->fcr31); */
return SIGFPE;
}
break;
default:
return SIGILL;
}
break;
}
case d_fmt:{ /* 1 */
ieee754dp(*handler) (ieee754dp, ieee754dp, ieee754dp);
ieee754dp fd, fr, fs, ft;
u64 __user *va;
u64 val;
switch (MIPSInst_FUNC(ir)) {
case ldxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_FD(ir));
break;
case sdxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_d_op:
handler = fpemu_dp_madd;
goto dcoptop;
case msub_d_op:
handler = fpemu_dp_msub;
goto dcoptop;
case nmadd_d_op:
handler = fpemu_dp_nmadd;
goto dcoptop;
case nmsub_d_op:
handler = fpemu_dp_nmsub;
goto dcoptop;
dcoptop:
DPFROMREG(fr, MIPSInst_FR(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
DPTOREG(fd, MIPSInst_FD(ir));
goto copcsr;
default:
return SIGILL;
}
break;
}
case 0x7: /* 7 */
if (MIPSInst_FUNC(ir) != pfetch_op) {
return SIGILL;
}
/* ignore prefx operation */
break;
default:
return SIGILL;
}
return 0;
}
#endif
/*
* Emulate a single COP1 arithmetic instruction.
*/
static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
int rfmt; /* resulting format */
unsigned rcsr = 0; /* resulting csr */
unsigned cond;
union {
ieee754dp d;
ieee754sp s;
int w;
#ifdef __mips64
s64 l;
#endif
} rv; /* resulting value */
MIPS_FPU_EMU_INC_STATS(cp1ops);
switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
case s_fmt:{ /* 0 */
union {
ieee754sp(*b) (ieee754sp, ieee754sp);
ieee754sp(*u) (ieee754sp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
handler.b = ieee754sp_add;
goto scopbop;
case fsub_op:
handler.b = ieee754sp_sub;
goto scopbop;
case fmul_op:
handler.b = ieee754sp_mul;
goto scopbop;
case fdiv_op:
handler.b = ieee754sp_div;
goto scopbop;
/* unary ops */
#if __mips >= 2 || defined(__mips64)
case fsqrt_op:
handler.u = ieee754sp_sqrt;
goto scopuop;
#endif
#if __mips >= 4 && __mips != 32
case frsqrt_op:
handler.u = fpemu_sp_rsqrt;
goto scopuop;
case frecip_op:
handler.u = fpemu_sp_recip;
goto scopuop;
#endif
#if __mips >= 4
case fmovc_op:
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovz_op:
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovn_op:
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
#endif
case fabs_op:
handler.u = ieee754sp_abs;
goto scopuop;
case fneg_op:
handler.u = ieee754sp_neg;
goto scopuop;
case fmov_op:
/* an easy one */
SPFROMREG(rv.s, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
scopbop:
{
ieee754sp fs, ft;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.s = (*handler.b) (fs, ft);
goto copcsr;
}
scopuop:
{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = (*handler.u) (fs);
goto copcsr;
}
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT))
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
if (ieee754_cxtest(IEEE754_UNDERFLOW))
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
if (ieee754_cxtest(IEEE754_OVERFLOW))
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
if (ieee754_cxtest(IEEE754_ZERO_DIVIDE))
rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
break;
/* unary conv ops */
case fcvts_op:
return SIGILL; /* not defined */
case fcvtd_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fsp(fs);
rfmt = d_fmt;
goto copcsr;
}
case fcvtw_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_tint(fs);
rfmt = w_fmt;
goto copcsr;
}
#if __mips >= 2 || defined(__mips64)
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
}
#endif /* __mips >= 2 */
#if defined(__mips64)
case fcvtl_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754sp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
}
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
}
#endif /* defined(__mips64) */
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
ieee754sp fs, ft;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754sp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8) && ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case d_fmt:{
union {
ieee754dp(*b) (ieee754dp, ieee754dp);
ieee754dp(*u) (ieee754dp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
handler.b = ieee754dp_add;
goto dcopbop;
case fsub_op:
handler.b = ieee754dp_sub;
goto dcopbop;
case fmul_op:
handler.b = ieee754dp_mul;
goto dcopbop;
case fdiv_op:
handler.b = ieee754dp_div;
goto dcopbop;
/* unary ops */
#if __mips >= 2 || defined(__mips64)
case fsqrt_op:
handler.u = ieee754dp_sqrt;
goto dcopuop;
#endif
#if __mips >= 4 && __mips != 32
case frsqrt_op:
handler.u = fpemu_dp_rsqrt;
goto dcopuop;
case frecip_op:
handler.u = fpemu_dp_recip;
goto dcopuop;
#endif
#if __mips >= 4
case fmovc_op:
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovz_op:
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovn_op:
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
#endif
case fabs_op:
handler.u = ieee754dp_abs;
goto dcopuop;
case fneg_op:
handler.u = ieee754dp_neg;
goto dcopuop;
case fmov_op:
/* an easy one */
DPFROMREG(rv.d, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
dcopbop:{
ieee754dp fs, ft;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.d = (*handler.b) (fs, ft);
goto copcsr;
}
dcopuop:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = (*handler.u) (fs);
goto copcsr;
}
/* unary conv ops */
case fcvts_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fdp(fs);
rfmt = s_fmt;
goto copcsr;
}
case fcvtd_op:
return SIGILL; /* not defined */
case fcvtw_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754dp_tint(fs); /* wrong */
rfmt = w_fmt;
goto copcsr;
}
#if __mips >= 2 || defined(__mips64)
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
}
#endif
#if defined(__mips64)
case fcvtl_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
}
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
}
#endif /* __mips >= 3 */
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
ieee754dp fs, ft;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754dp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8)
&&
ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case w_fmt:{
ieee754sp fs;
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert word to single precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fint(fs.bits);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert word to double precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fint(fs.bits);
rfmt = d_fmt;
goto copcsr;
default:
return SIGILL;
}
break;
}
#if defined(__mips64)
case l_fmt:{
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert long to single precision real */
rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert long to double precision real */
rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]);
rfmt = d_fmt;
goto copcsr;
default:
return SIGILL;
}
break;
}
#endif
default:
return SIGILL;
}
/*
* Update the fpu CSR register for this operation.
* If an exception is required, generate a tidy SIGFPE exception,
* without updating the result register.
* Note: cause exception bits do not accumulate, they are rewritten
* for each op; only the flag/sticky bits accumulate.
*/
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: fpu csr = %08x\n",ctx->fcr31); */
return SIGFPE;
}
/*
* Now we can safely write the result back to the register file.
*/
switch (rfmt) {
case -1:{
#if __mips >= 4
cond = fpucondbit[MIPSInst_FD(ir) >> 2];
#else
cond = FPU_CSR_COND;
#endif
if (rv.w)
ctx->fcr31 |= cond;
else
ctx->fcr31 &= ~cond;
break;
}
case d_fmt:
DPTOREG(rv.d, MIPSInst_FD(ir));
break;
case s_fmt:
SPTOREG(rv.s, MIPSInst_FD(ir));
break;
case w_fmt:
SITOREG(rv.w, MIPSInst_FD(ir));
break;
#if defined(__mips64)
case l_fmt:
DITOREG(rv.l, MIPSInst_FD(ir));
break;
#endif
default:
return SIGILL;
}
return 0;
}
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
unsigned long oldepc, prevepc;
mips_instruction insn;
int sig = 0;
oldepc = xcp->cp0_epc;
do {
prevepc = xcp->cp0_epc;
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
if (insn == 0)
xcp->cp0_epc += 4; /* skip nops */
else {
/*
* The 'ieee754_csr' is an alias of
* ctx->fcr31. No need to copy ctx->fcr31 to
* ieee754_csr. But ieee754_csr.rm is ieee
* library modes. (not mips rounding mode)
*/
/* convert to ieee library modes */
ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
sig = cop1Emulate(xcp, ctx, fault_addr);
/* revert to mips rounding mode */
ieee754_csr.rm = mips_rm[ieee754_csr.rm];
}
if (has_fpu)
break;
if (sig)
break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
/* SIGILL indicates a non-fpu instruction */
if (sig == SIGILL && xcp->cp0_epc != oldepc)
/* but if epc has advanced, then ignore it */
sig = 0;
return sig;
}
#ifdef CONFIG_DEBUG_FS
static int fpuemu_stat_get(void *data, u64 *val)
{
int cpu;
unsigned long sum = 0;
for_each_online_cpu(cpu) {
struct mips_fpu_emulator_stats *ps;
local_t *pv;
ps = &per_cpu(fpuemustats, cpu);
pv = (void *)ps + (unsigned long)data;
sum += local_read(pv);
}
*val = sum;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_fpuemu(void)
{
struct dentry *d, *dir;
if (!mips_debugfs_dir)
return -ENODEV;
dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
if (!dir)
return -ENOMEM;
#define FPU_STAT_CREATE(M) \
do { \
d = debugfs_create_file(#M , S_IRUGO, dir, \
(void *)offsetof(struct mips_fpu_emulator_stats, M), \
&fops_fpuemu_stat); \
if (!d) \
return -ENOMEM; \
} while (0)
FPU_STAT_CREATE(emulated);
FPU_STAT_CREATE(loads);
FPU_STAT_CREATE(stores);
FPU_STAT_CREATE(cp1ops);
FPU_STAT_CREATE(cp1xops);
FPU_STAT_CREATE(errors);
return 0;
}
__initcall(debugfs_fpuemu);
#endif
| /*
* cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator
*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* A complete emulator for MIPS coprocessor 1 instructions. This is
* required for #float(switch) or #float(trap), where it catches all
* COP1 instructions via the "CoProcessor Unusable" exception.
*
* More surprisingly it is also required for #float(ieee), to help out
* the hardware fpu at the boundaries of the IEEE-754 representation
* (denormalised values, infinities, underflow, etc). It is made
* quite nasty because emulation of some non-COP1 instructions is
* required, e.g. in branch delay slots.
*
* Note if you know that you won't have an fpu, then you'll get much
* better performance by compiling with -msoft-float!
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/inst.h>
#include <asm/bootinfo.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/mipsregs.h>
#include <asm/fpu_emulator.h>
#include <asm/uaccess.h>
#include <asm/branch.h>
#include "ieee754.h"
/* Strap kernel emulator for full MIPS IV emulation */
#ifdef __mips
#undef __mips
#endif
#define __mips 4
/* Function which emulates a floating point instruction. */
static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
mips_instruction);
#if __mips >= 4 && __mips != 32
static int fpux_emu(struct pt_regs *,
struct mips_fpu_struct *, mips_instruction, void *__user *);
#endif
/* Further private data for which no space exists in mips_fpu_struct */
#ifdef CONFIG_DEBUG_FS
DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#endif
/* Control registers */
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_CSR 31 /* $31 = csr */
/* Determine rounding mode from the RM bits of the FCSR */
#define modeindex(v) ((v) & FPU_CSR_RM)
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
[FPU_CSR_RZ] = IEEE754_RZ,
[FPU_CSR_RU] = IEEE754_RU,
[FPU_CSR_RD] = IEEE754_RD,
};
/* Convert IEEE library modes to Mips rounding mode (0..3). */
static const unsigned char mips_rm[4] = {
[IEEE754_RN] = FPU_CSR_RN,
[IEEE754_RZ] = FPU_CSR_RZ,
[IEEE754_RD] = FPU_CSR_RD,
[IEEE754_RU] = FPU_CSR_RU,
};
#if __mips >= 4
/* convert condition code register number to csr bit */
static const unsigned int fpucondbit[8] = {
FPU_CSR_COND0,
FPU_CSR_COND1,
FPU_CSR_COND2,
FPU_CSR_COND3,
FPU_CSR_COND4,
FPU_CSR_COND5,
FPU_CSR_COND6,
FPU_CSR_COND7
};
#endif
/*
* Redundant with logic already in kernel/branch.c,
* embedded in compute_return_epc. At some point,
* a single subroutine should be used across both
* modules.
*/
static int isBranchInstr(mips_instruction * i)
{
switch (MIPSInst_OPCODE(*i)) {
case spec_op:
switch (MIPSInst_FUNC(*i)) {
case jalr_op:
case jr_op:
return 1;
}
break;
case bcond_op:
switch (MIPSInst_RT(*i)) {
case bltz_op:
case bgez_op:
case bltzl_op:
case bgezl_op:
case bltzal_op:
case bgezal_op:
case bltzall_op:
case bgezall_op:
return 1;
}
break;
case j_op:
case jal_op:
case jalx_op:
case beq_op:
case bne_op:
case blez_op:
case bgtz_op:
case beql_op:
case bnel_op:
case blezl_op:
case bgtzl_op:
return 1;
case cop0_op:
case cop1_op:
case cop2_op:
case cop1x_op:
if (MIPSInst_RS(*i) == bc_op)
return 1;
break;
}
return 0;
}
/*
* In the Linux kernel, we support selection of FPR format on the
* basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
* 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS
* as a proxy for the FR bit so that a 64-bit FPU is emulated. In any
* case, for a 32-bit kernel which uses the O32 MIPS ABI, only the
* even FPRs are used (Status.FR = 0).
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
if (cpu_has_fpu)
return xcp->cp0_status & ST0_FR;
#ifdef CONFIG_64BIT
return !test_thread_flag(TIF_32BIT_REGS);
#else
return 0;
#endif
}
#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
(int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
cop1_64bit(xcp) || !(x & 1) ? \
ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
#define SPTOREG(sp, x) SITOREG((sp).bits, x)
#define DPFROMREG(dp, x) DIFROMREG((dp).bits, x)
#define DPTOREG(dp, x) DITOREG((dp).bits, x)
/*
* Emulate the single floating point instruction pointed at by EPC.
* Two instructions if the instruction is in a branch delay slot.
*/
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
void *__user *fault_addr)
{
mips_instruction ir;
unsigned long emulpc, contpc;
unsigned int cond;
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
/* XXX NEC Vr54xx bug workaround */
if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
xcp->cp0_cause &= ~CAUSEF_BD;
if (xcp->cp0_cause & CAUSEF_BD) {
/*
* The instruction to be emulated is in a branch delay slot
* which means that we have to emulate the branch instruction
* BEFORE we do the cop1 instruction.
*
* This branch could be a COP1 branch, but in that case we
* would have had a trap for that instruction, and would not
* come through this route.
*
* Linux MIPS branch emulator operates on context, updating the
* cp0_epc.
*/
emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */
if (__compute_return_epc(xcp)) {
#ifdef CP1DBG
printk("failed to emulate branch at %p\n",
(void *) (xcp->cp0_epc));
#endif
return SIGILL;
}
if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) emulpc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGSEGV;
}
/* __compute_return_epc() will have updated cp0_epc */
contpc = xcp->cp0_epc;
/* In order not to confuse ptrace() et al, tweak context */
xcp->cp0_epc = emulpc - 4;
} else {
emulpc = xcp->cp0_epc;
contpc = xcp->cp0_epc + 4;
}
emul:
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_RT(ir));
break;
}
case sdc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
case lwc1_op:{
u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u32 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_RT(ir));
break;
}
case swc1_op:{
u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u32 val;
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
case cop1_op:
switch (MIPSInst_RS(ir)) {
#if defined(__mips64)
case dmfc_op:
/* copregister fs -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case dmtc_op:
/* copregister fs <- rt */
DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
#endif
case mfc_op:
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case mtc_op:
/* copregister rd <- rt */
SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
case cfc_op:{
/* cop control register rd -> gpr[rt] */
u32 value;
if (MIPSInst_RD(ir) == FPCREG_CSR) {
value = ctx->fcr31;
value = (value & ~FPU_CSR_RM) |
mips_rm[modeindex(value)];
#ifdef CSRTRACE
printk("%p gpr[%d]<-csr=%08x\n",
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
}
else if (MIPSInst_RD(ir) == FPCREG_RID)
value = 0;
else
value = 0;
if (MIPSInst_RT(ir))
xcp->regs[MIPSInst_RT(ir)] = value;
break;
}
case ctc_op:{
/* copregister rd <- rt */
u32 value;
if (MIPSInst_RT(ir) == 0)
value = 0;
else
value = xcp->regs[MIPSInst_RT(ir)];
/* we only have one writable control reg
*/
if (MIPSInst_RD(ir) == FPCREG_CSR) {
#ifdef CSRTRACE
printk("%p gpr[%d]->csr=%08x\n",
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
/*
* Don't write reserved bits,
* and convert to ieee library modes
*/
ctx->fcr31 = (value &
~(FPU_CSR_RSVD | FPU_CSR_RM)) |
ieee_rm[modeindex(value)];
}
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
}
break;
}
case bc_op:{
int likely = 0;
if (xcp->cp0_cause & CAUSEF_BD)
return SIGILL;
#if __mips >= 4
cond = ctx->fcr31 & fpucondbit[MIPSInst_RT(ir) >> 2];
#else
cond = ctx->fcr31 & FPU_CSR_COND;
#endif
switch (MIPSInst_RT(ir) & 3) {
case bcfl_op:
likely = 1;
case bcf_op:
cond = !cond;
break;
case bctl_op:
likely = 1;
case bct_op:
break;
default:
/* thats an illegal instruction */
return SIGILL;
}
xcp->cp0_cause |= CAUSEF_BD;
if (cond) {
/* branch taken: emulate dslot
* instruction
*/
xcp->cp0_epc += 4;
contpc = (xcp->cp0_epc +
(MIPSInst_SIMM(ir) << 2));
if (!access_ok(VERIFY_READ, xcp->cp0_epc,
sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir,
(mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
switch (MIPSInst_OPCODE(ir)) {
case lwc1_op:
case swc1_op:
#if (__mips >= 2 || defined(__mips64))
case ldc1_op:
case sdc1_op:
#endif
case cop1_op:
#if __mips >= 4 && __mips != 32
case cop1x_op:
#endif
/* its one of ours */
goto emul;
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) == movc_op)
goto emul;
break;
#endif
}
/*
* Single step the non-cp1
* instruction in the dslot
*/
return mips_dsemul(xcp, ir, contpc);
}
else {
/* branch not taken */
if (likely) {
/*
* branch likely nullifies
* dslot if not taken
*/
xcp->cp0_epc += 4;
contpc += 4;
/*
* else continue & execute
* dslot as normal insn
*/
}
}
break;
}
default:
if (!(MIPSInst_RS(ir) & 0x10))
return SIGILL;
{
int sig;
/* a real fpu computation instruction */
if ((sig = fpu_emu(xcp, ctx, ir)))
return sig;
}
}
break;
#if __mips >= 4 && __mips != 32
case cop1x_op:{
int sig = fpux_emu(xcp, ctx, ir, fault_addr);
if (sig)
return sig;
break;
}
#endif
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) != movc_op)
return SIGILL;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
xcp->regs[MIPSInst_RD(ir)] =
xcp->regs[MIPSInst_RS(ir)];
break;
#endif
default:
return SIGILL;
}
/* we did it !! */
xcp->cp0_epc = contpc;
xcp->cp0_cause &= ~CAUSEF_BD;
return 0;
}
/*
* Conversion table from MIPS compare ops 48-63
* cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
*/
static const unsigned char cmptab[8] = {
0, /* cmp_0 (sig) cmp_sf */
IEEE754_CUN, /* cmp_un (sig) cmp_ngle */
IEEE754_CEQ, /* cmp_eq (sig) cmp_seq */
IEEE754_CEQ | IEEE754_CUN, /* cmp_ueq (sig) cmp_ngl */
IEEE754_CLT, /* cmp_olt (sig) cmp_lt */
IEEE754_CLT | IEEE754_CUN, /* cmp_ult (sig) cmp_nge */
IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
};
#if __mips >= 4 && __mips != 32
/*
* Additional MIPS4 instructions
*/
#define DEF3OP(name, p, f1, f2, f3) \
static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \
ieee754##p t) \
{ \
struct _ieee754_csr ieee754_csr_save; \
s = f1(s, t); \
ieee754_csr_save = ieee754_csr; \
s = f2(s, r); \
ieee754_csr_save.cx |= ieee754_csr.cx; \
ieee754_csr_save.sx |= ieee754_csr.sx; \
s = f3(s); \
ieee754_csr.cx |= ieee754_csr_save.cx; \
ieee754_csr.sx |= ieee754_csr_save.sx; \
return s; \
}
static ieee754dp fpemu_dp_recip(ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), d);
}
static ieee754dp fpemu_dp_rsqrt(ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
}
static ieee754sp fpemu_sp_recip(ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), s);
}
static ieee754sp fpemu_sp_rsqrt(ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
}
DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir, void *__user *fault_addr)
{
unsigned rcsr = 0; /* resulting csr */
MIPS_FPU_EMU_INC_STATS(cp1xops);
switch (MIPSInst_FMA_FFMT(ir)) {
case s_fmt:{ /* 0 */
ieee754sp(*handler) (ieee754sp, ieee754sp, ieee754sp);
ieee754sp fd, fr, fs, ft;
u32 __user *va;
u32 val;
switch (MIPSInst_FUNC(ir)) {
case lwxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_FD(ir));
break;
case swxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_s_op:
handler = fpemu_sp_madd;
goto scoptop;
case msub_s_op:
handler = fpemu_sp_msub;
goto scoptop;
case nmadd_s_op:
handler = fpemu_sp_nmadd;
goto scoptop;
case nmsub_s_op:
handler = fpemu_sp_nmsub;
goto scoptop;
scoptop:
SPFROMREG(fr, MIPSInst_FR(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
SPTOREG(fd, MIPSInst_FD(ir));
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT))
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
if (ieee754_cxtest(IEEE754_UNDERFLOW))
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
if (ieee754_cxtest(IEEE754_OVERFLOW))
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: fpu csr = %08x\n",
ctx->fcr31); */
return SIGFPE;
}
break;
default:
return SIGILL;
}
break;
}
case d_fmt:{ /* 1 */
ieee754dp(*handler) (ieee754dp, ieee754dp, ieee754dp);
ieee754dp fd, fr, fs, ft;
u64 __user *va;
u64 val;
switch (MIPSInst_FUNC(ir)) {
case ldxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_FD(ir));
break;
case sdxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_d_op:
handler = fpemu_dp_madd;
goto dcoptop;
case msub_d_op:
handler = fpemu_dp_msub;
goto dcoptop;
case nmadd_d_op:
handler = fpemu_dp_nmadd;
goto dcoptop;
case nmsub_d_op:
handler = fpemu_dp_nmsub;
goto dcoptop;
dcoptop:
DPFROMREG(fr, MIPSInst_FR(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
DPTOREG(fd, MIPSInst_FD(ir));
goto copcsr;
default:
return SIGILL;
}
break;
}
case 0x7: /* 7 */
if (MIPSInst_FUNC(ir) != pfetch_op) {
return SIGILL;
}
/* ignore prefx operation */
break;
default:
return SIGILL;
}
return 0;
}
#endif
/*
* Emulate a single COP1 arithmetic instruction.
*/
static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
int rfmt; /* resulting format */
unsigned rcsr = 0; /* resulting csr */
unsigned cond;
union {
ieee754dp d;
ieee754sp s;
int w;
#ifdef __mips64
s64 l;
#endif
} rv; /* resulting value */
MIPS_FPU_EMU_INC_STATS(cp1ops);
switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
case s_fmt:{ /* 0 */
union {
ieee754sp(*b) (ieee754sp, ieee754sp);
ieee754sp(*u) (ieee754sp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
handler.b = ieee754sp_add;
goto scopbop;
case fsub_op:
handler.b = ieee754sp_sub;
goto scopbop;
case fmul_op:
handler.b = ieee754sp_mul;
goto scopbop;
case fdiv_op:
handler.b = ieee754sp_div;
goto scopbop;
/* unary ops */
#if __mips >= 2 || defined(__mips64)
case fsqrt_op:
handler.u = ieee754sp_sqrt;
goto scopuop;
#endif
#if __mips >= 4 && __mips != 32
case frsqrt_op:
handler.u = fpemu_sp_rsqrt;
goto scopuop;
case frecip_op:
handler.u = fpemu_sp_recip;
goto scopuop;
#endif
#if __mips >= 4
case fmovc_op:
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovz_op:
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovn_op:
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
#endif
case fabs_op:
handler.u = ieee754sp_abs;
goto scopuop;
case fneg_op:
handler.u = ieee754sp_neg;
goto scopuop;
case fmov_op:
/* an easy one */
SPFROMREG(rv.s, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
scopbop:
{
ieee754sp fs, ft;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.s = (*handler.b) (fs, ft);
goto copcsr;
}
scopuop:
{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = (*handler.u) (fs);
goto copcsr;
}
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT))
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
if (ieee754_cxtest(IEEE754_UNDERFLOW))
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
if (ieee754_cxtest(IEEE754_OVERFLOW))
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
if (ieee754_cxtest(IEEE754_ZERO_DIVIDE))
rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
break;
/* unary conv ops */
case fcvts_op:
return SIGILL; /* not defined */
case fcvtd_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fsp(fs);
rfmt = d_fmt;
goto copcsr;
}
case fcvtw_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_tint(fs);
rfmt = w_fmt;
goto copcsr;
}
#if __mips >= 2 || defined(__mips64)
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
}
#endif /* __mips >= 2 */
#if defined(__mips64)
case fcvtl_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754sp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
}
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
}
#endif /* defined(__mips64) */
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
ieee754sp fs, ft;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754sp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8) && ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case d_fmt:{
union {
ieee754dp(*b) (ieee754dp, ieee754dp);
ieee754dp(*u) (ieee754dp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
handler.b = ieee754dp_add;
goto dcopbop;
case fsub_op:
handler.b = ieee754dp_sub;
goto dcopbop;
case fmul_op:
handler.b = ieee754dp_mul;
goto dcopbop;
case fdiv_op:
handler.b = ieee754dp_div;
goto dcopbop;
/* unary ops */
#if __mips >= 2 || defined(__mips64)
case fsqrt_op:
handler.u = ieee754dp_sqrt;
goto dcopuop;
#endif
#if __mips >= 4 && __mips != 32
case frsqrt_op:
handler.u = fpemu_dp_rsqrt;
goto dcopuop;
case frecip_op:
handler.u = fpemu_dp_recip;
goto dcopuop;
#endif
#if __mips >= 4
case fmovc_op:
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovz_op:
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovn_op:
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
#endif
case fabs_op:
handler.u = ieee754dp_abs;
goto dcopuop;
case fneg_op:
handler.u = ieee754dp_neg;
goto dcopuop;
case fmov_op:
/* an easy one */
DPFROMREG(rv.d, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
dcopbop:{
ieee754dp fs, ft;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.d = (*handler.b) (fs, ft);
goto copcsr;
}
dcopuop:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = (*handler.u) (fs);
goto copcsr;
}
/* unary conv ops */
case fcvts_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fdp(fs);
rfmt = s_fmt;
goto copcsr;
}
case fcvtd_op:
return SIGILL; /* not defined */
case fcvtw_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754dp_tint(fs); /* wrong */
rfmt = w_fmt;
goto copcsr;
}
#if __mips >= 2 || defined(__mips64)
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
}
#endif
#if defined(__mips64)
case fcvtl_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
}
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
}
#endif /* __mips >= 3 */
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
ieee754dp fs, ft;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754dp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8)
&&
ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case w_fmt:{
ieee754sp fs;
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert word to single precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fint(fs.bits);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert word to double precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fint(fs.bits);
rfmt = d_fmt;
goto copcsr;
default:
return SIGILL;
}
break;
}
#if defined(__mips64)
case l_fmt:{
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert long to single precision real */
rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert long to double precision real */
rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]);
rfmt = d_fmt;
goto copcsr;
default:
return SIGILL;
}
break;
}
#endif
default:
return SIGILL;
}
/*
* Update the fpu CSR register for this operation.
* If an exception is required, generate a tidy SIGFPE exception,
* without updating the result register.
* Note: cause exception bits do not accumulate, they are rewritten
* for each op; only the flag/sticky bits accumulate.
*/
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: fpu csr = %08x\n",ctx->fcr31); */
return SIGFPE;
}
/*
* Now we can safely write the result back to the register file.
*/
switch (rfmt) {
case -1:{
#if __mips >= 4
cond = fpucondbit[MIPSInst_FD(ir) >> 2];
#else
cond = FPU_CSR_COND;
#endif
if (rv.w)
ctx->fcr31 |= cond;
else
ctx->fcr31 &= ~cond;
break;
}
case d_fmt:
DPTOREG(rv.d, MIPSInst_FD(ir));
break;
case s_fmt:
SPTOREG(rv.s, MIPSInst_FD(ir));
break;
case w_fmt:
SITOREG(rv.w, MIPSInst_FD(ir));
break;
#if defined(__mips64)
case l_fmt:
DITOREG(rv.l, MIPSInst_FD(ir));
break;
#endif
default:
return SIGILL;
}
return 0;
}
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
unsigned long oldepc, prevepc;
mips_instruction insn;
int sig = 0;
oldepc = xcp->cp0_epc;
do {
prevepc = xcp->cp0_epc;
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
if (insn == 0)
xcp->cp0_epc += 4; /* skip nops */
else {
/*
* The 'ieee754_csr' is an alias of
* ctx->fcr31. No need to copy ctx->fcr31 to
* ieee754_csr. But ieee754_csr.rm is ieee
* library modes. (not mips rounding mode)
*/
/* convert to ieee library modes */
ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
sig = cop1Emulate(xcp, ctx, fault_addr);
/* revert to mips rounding mode */
ieee754_csr.rm = mips_rm[ieee754_csr.rm];
}
if (has_fpu)
break;
if (sig)
break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
/* SIGILL indicates a non-fpu instruction */
if (sig == SIGILL && xcp->cp0_epc != oldepc)
/* but if epc has advanced, then ignore it */
sig = 0;
return sig;
}
#ifdef CONFIG_DEBUG_FS
static int fpuemu_stat_get(void *data, u64 *val)
{
int cpu;
unsigned long sum = 0;
for_each_online_cpu(cpu) {
struct mips_fpu_emulator_stats *ps;
local_t *pv;
ps = &per_cpu(fpuemustats, cpu);
pv = (void *)ps + (unsigned long)data;
sum += local_read(pv);
}
*val = sum;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_fpuemu(void)
{
struct dentry *d, *dir;
if (!mips_debugfs_dir)
return -ENODEV;
dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
if (!dir)
return -ENOMEM;
#define FPU_STAT_CREATE(M) \
do { \
d = debugfs_create_file(#M , S_IRUGO, dir, \
(void *)offsetof(struct mips_fpu_emulator_stats, M), \
&fops_fpuemu_stat); \
if (!d) \
return -ENOMEM; \
} while (0)
FPU_STAT_CREATE(emulated);
FPU_STAT_CREATE(loads);
FPU_STAT_CREATE(stores);
FPU_STAT_CREATE(cp1ops);
FPU_STAT_CREATE(cp1xops);
FPU_STAT_CREATE(errors);
return 0;
}
__initcall(debugfs_fpuemu);
#endif
|
110,014,159,054,670 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
siginfo_t info;
int fault;
#if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write,
field, regs->cp0_epc);
#endif
#ifdef CONFIG_KPROBES
/*
* This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
(regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
return;
#endif
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto VMALLOC_FAULT_TARGET;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (kernel_uses_smartmips_rixi) {
if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
if (!(vma->vm_flags & VM_READ)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
1, 0, regs, address);
tsk->maj_flt++;
} else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
1, 0, regs, address);
tsk->min_flt++;
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
#if 0
printk("do_page_fault() #2: sending SIGSEGV to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
out_of_memory:
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
up_read(&mm->mmap_sem);
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
#ifndef CONFIG_64BIT
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
#endif
}
| /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
siginfo_t info;
int fault;
#if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write,
field, regs->cp0_epc);
#endif
#ifdef CONFIG_KPROBES
/*
* This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
(regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
return;
#endif
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto VMALLOC_FAULT_TARGET;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (kernel_uses_smartmips_rixi) {
if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
if (!(vma->vm_flags & VM_READ)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
tsk->maj_flt++;
} else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
tsk->min_flt++;
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
#if 0
printk("do_page_fault() #2: sending SIGSEGV to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
out_of_memory:
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
up_read(&mm->mmap_sem);
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
#ifndef CONFIG_64BIT
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
#endif
}
|
150,539,401,702,001 | /*
* Copyright 2007 Sony Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_POWERPC_EMULATED_OPS_H
#define _ASM_POWERPC_EMULATED_OPS_H
#include <asm/atomic.h>
#include <linux/perf_event.h>
#ifdef CONFIG_PPC_EMULATED_STATS
struct ppc_emulated_entry {
const char *name;
atomic_t val;
};
extern struct ppc_emulated {
#ifdef CONFIG_ALTIVEC
struct ppc_emulated_entry altivec;
#endif
struct ppc_emulated_entry dcba;
struct ppc_emulated_entry dcbz;
struct ppc_emulated_entry fp_pair;
struct ppc_emulated_entry isel;
struct ppc_emulated_entry mcrxr;
struct ppc_emulated_entry mfpvr;
struct ppc_emulated_entry multiple;
struct ppc_emulated_entry popcntb;
struct ppc_emulated_entry spe;
struct ppc_emulated_entry string;
struct ppc_emulated_entry unaligned;
#ifdef CONFIG_MATH_EMULATION
struct ppc_emulated_entry math;
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
struct ppc_emulated_entry 8xx;
#endif
#ifdef CONFIG_VSX
struct ppc_emulated_entry vsx;
#endif
#ifdef CONFIG_PPC64
struct ppc_emulated_entry mfdscr;
struct ppc_emulated_entry mtdscr;
#endif
} ppc_emulated;
extern u32 ppc_warn_emulated;
extern void ppc_warn_emulated_print(const char *type);
#define __PPC_WARN_EMULATED(type) \
do { \
atomic_inc(&ppc_emulated.type.val); \
if (ppc_warn_emulated) \
ppc_warn_emulated_print(ppc_emulated.type.name); \
} while (0)
#else /* !CONFIG_PPC_EMULATED_STATS */
#define __PPC_WARN_EMULATED(type) do { } while (0)
#endif /* !CONFIG_PPC_EMULATED_STATS */
#define PPC_WARN_EMULATED(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
1, 0, regs, 0); \
__PPC_WARN_EMULATED(type); \
} while (0)
#define PPC_WARN_ALIGNMENT(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
1, 0, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \
} while (0)
#endif /* _ASM_POWERPC_EMULATED_OPS_H */
| /*
* Copyright 2007 Sony Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_POWERPC_EMULATED_OPS_H
#define _ASM_POWERPC_EMULATED_OPS_H
#include <asm/atomic.h>
#include <linux/perf_event.h>
#ifdef CONFIG_PPC_EMULATED_STATS
struct ppc_emulated_entry {
const char *name;
atomic_t val;
};
extern struct ppc_emulated {
#ifdef CONFIG_ALTIVEC
struct ppc_emulated_entry altivec;
#endif
struct ppc_emulated_entry dcba;
struct ppc_emulated_entry dcbz;
struct ppc_emulated_entry fp_pair;
struct ppc_emulated_entry isel;
struct ppc_emulated_entry mcrxr;
struct ppc_emulated_entry mfpvr;
struct ppc_emulated_entry multiple;
struct ppc_emulated_entry popcntb;
struct ppc_emulated_entry spe;
struct ppc_emulated_entry string;
struct ppc_emulated_entry unaligned;
#ifdef CONFIG_MATH_EMULATION
struct ppc_emulated_entry math;
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
struct ppc_emulated_entry 8xx;
#endif
#ifdef CONFIG_VSX
struct ppc_emulated_entry vsx;
#endif
#ifdef CONFIG_PPC64
struct ppc_emulated_entry mfdscr;
struct ppc_emulated_entry mtdscr;
#endif
} ppc_emulated;
extern u32 ppc_warn_emulated;
extern void ppc_warn_emulated_print(const char *type);
#define __PPC_WARN_EMULATED(type) \
do { \
atomic_inc(&ppc_emulated.type.val); \
if (ppc_warn_emulated) \
ppc_warn_emulated_print(ppc_emulated.type.name); \
} while (0)
#else /* !CONFIG_PPC_EMULATED_STATS */
#define __PPC_WARN_EMULATED(type) do { } while (0)
#endif /* !CONFIG_PPC_EMULATED_STATS */
#define PPC_WARN_EMULATED(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
1, regs, 0); \
__PPC_WARN_EMULATED(type); \
} while (0)
#define PPC_WARN_ALIGNMENT(type, regs) \
do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
1, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \
} while (0)
#endif /* _ASM_POWERPC_EMULATED_OPS_H */
|
246,203,919,000,790 | /*
* Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
unsigned long mmcr[3];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned int group_flag;
int n_txn_start;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
/*
* Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
* and a few other names are different.
*/
#ifdef CONFIG_PPC32
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
#define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
}
static inline void perf_read_regs(struct pt_regs *regs) { }
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_PPC32 */
/*
* Things that are specific to 64-bit implementations.
*/
#ifdef CONFIG_PPC64
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
return 0;
}
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
* bit in MMCRA.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
*addrp = mfspr(SPRN_SDAR);
}
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
unsigned long sihv = MMCRA_SIHV;
unsigned long sipr = MMCRA_SIPR;
if (TRAP(regs) != 0xf00)
return 0; /* not a PMU interrupt */
if (ppmu->flags & PPMU_ALT_SIPR) {
sihv = POWER6_MMCRA_SIHV;
sipr = POWER6_MMCRA_SIPR;
}
/* PR has priority over HV, so order below is important */
if (mmcra & sipr)
return PERF_RECORD_MISC_USER;
if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
*/
static inline void perf_read_regs(struct pt_regs *regs)
{
regs->dsisr = mfspr(SPRN_MMCRA);
}
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return !regs->softe;
}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
void perf_event_print_debug(void)
{
}
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 1:
val = mfspr(SPRN_PMC1);
break;
case 2:
val = mfspr(SPRN_PMC2);
break;
case 3:
val = mfspr(SPRN_PMC3);
break;
case 4:
val = mfspr(SPRN_PMC4);
break;
case 5:
val = mfspr(SPRN_PMC5);
break;
case 6:
val = mfspr(SPRN_PMC6);
break;
#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 1:
mtspr(SPRN_PMC1, val);
break;
case 2:
mtspr(SPRN_PMC2, val);
break;
case 3:
mtspr(SPRN_PMC3, val);
break;
case 4:
mtspr(SPRN_PMC4, val);
break;
case 5:
mtspr(SPRN_PMC5, val);
break;
case 6:
mtspr(SPRN_PMC6, val);
break;
#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
}
/*
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event_id[].
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
&& !ppmu->limited_pmc_event(event_id[i])) {
ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0]))
return -1;
}
value = mask = 0;
for (i = 0; i < n_ev; ++i) {
nv = (value | cpuhw->avalues[i][0]) +
(value & cpuhw->avalues[i][0] & addf);
if ((((nv + tadd) ^ value) & mask) != 0 ||
(((nv + tadd) ^ cpuhw->avalues[i][0]) &
cpuhw->amasks[i][0]) != 0)
break;
value = nv;
mask |= cpuhw->amasks[i][0];
}
if (i == n_ev)
return 0; /* all OK */
/* doesn't work, gather alternatives... */
if (!ppmu->get_alternatives)
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
&cpuhw->avalues[i][j]);
}
/* enumerate all possibilities and see if any will work */
i = 0;
j = -1;
value = mask = nv = 0;
while (i < n_ev) {
if (j >= 0) {
/* we're backtracking, restore context */
value = svalues[i];
mask = smasks[i];
j = choice[i];
}
/*
* See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
nv = (value | cpuhw->avalues[i][j]) +
(value & cpuhw->avalues[i][j] & addf);
if ((((nv + tadd) ^ value) & mask) == 0 &&
(((nv + tadd) ^ cpuhw->avalues[i][j])
& cpuhw->amasks[i][j]) == 0)
break;
}
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
* to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
* Found a feasible alternative for event_id i,
* remember where we got up to with this event_id,
* go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
svalues[i] = value;
smasks[i] = mask;
value = nv;
mask |= cpuhw->amasks[i][j];
++i;
j = -1;
}
}
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
* Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
* added events.
*/
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
struct perf_event *event;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; ++i) {
if (cflags[i] & PPMU_LIMITED_PMC_OK) {
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
event = ctrs[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
if (eu || ek || eh)
for (i = 0; i < n; ++i)
if (cflags[i] & PPMU_LIMITED_PMC_OK)
cflags[i] |= PPMU_LIMITED_PMC_REQD;
return 0;
}
static u64 check_and_compute_delta(u64 prev, u64 val)
{
u64 delta = (val - prev) & 0xfffffffful;
/*
* POWER7 can roll back counter values, if the new value is smaller
* than the previous value it will cause the delta and the counter to
* have bogus values unless we rolled a counter over. If a coutner is
* rolled back, it will be smaller, but within 256, which is the maximum
* number of events to rollback at once. If we dectect a rollback
* return 0. This can lead to a small lack of precision in the
* counters.
*/
if (prev > val && (prev - val) < 256)
delta = 0;
return delta;
}
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
if (!event->hw.idx)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
* us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
return (ppmu->flags & PPMU_LIMITED_PMC5_6)
&& (pmcnum == 5 || pmcnum == 6);
}
static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
if (!event->hw.idx)
continue;
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
delta = check_and_compute_delta(prev, val);
if (delta)
local64_add(delta, &event->count);
}
}
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
if (check_and_compute_delta(prev, val))
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
/*
* Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
* other events. We try to keep the values from the limited
* events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
* the limited events as small and consistent as possible.
* Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
if (!cpuhw->n_limited) {
mtspr(SPRN_MMCR0, mmcr0);
return;
}
/*
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
* events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
: "=&r" (pmc5), "=&r" (pmc6)
: "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
"i" (SPRN_MMCR0),
"i" (SPRN_PMC5), "i" (SPRN_PMC6));
if (mmcr0 & MMCR0_FC)
freeze_limited_counters(cpuhw, pmc5, pmc6);
else
thaw_limited_counters(cpuhw, pmc5, pmc6);
/*
* Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
mtspr(SPRN_MMCR0, mmcr0);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
/*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mtspr(SPRN_MMCRA,
cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mb();
}
/*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
mb();
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val;
s64 left;
unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
}
cpuhw->disabled = 0;
/*
* If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_events == 0)
ppc_set_pmu_inuse(0);
goto out_enable;
}
/*
* Compute MMCR* values for the new set of events
*/
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
cpuhw->mmcr)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
/*
* Add in MMCR0 freeze bits corresponding to the
* attr.exclude_* bits for the first event.
* We have already checked that all events have the
* same values for these bits as the first event.
*/
event = cpuhw->event[0];
if (event->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
if (event->attr.exclude_kernel)
cpuhw->mmcr[0] |= freeze_events_kernel;
if (event->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
* Write the new configuration to MMCR* with the freeze
* bit set and set the hardware events to their initial values.
* Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
/*
* Read off any pre-existing events that need to move
* to another PMC.
*/
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
power_pmu_read(event);
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
}
/*
* Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
cpuhw->limited_counter[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
val = 0;
if (event->hw.sample_period) {
left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
if (event->hw.state & PERF_HES_STOPPED)
val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
out_enable:
mb();
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
/*
* Enable instruction sampling if necessary
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mb();
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
flags[n] = event->hw.event_base;
events[n++] = event->hw.config;
}
}
return n;
}
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
cpuhw = &__get_cpu_var(cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN)
goto nocheck;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
event->hw.config = cpuhw->events[n0];
nocheck:
++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
/*
* Remove a event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
cpuhw = &__get_cpu_var(cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
cpuhw->event[i-1] = cpuhw->event[i];
cpuhw->events[i-1] = cpuhw->events[i];
cpuhw->flags[i-1] = cpuhw->flags[i];
}
--cpuhw->n_events;
ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
if (event->hw.idx) {
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
if (event == cpuhw->limited_counter[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
}
--cpuhw->n_limited;
}
if (cpuhw->n_events == 0) {
/* disable exceptions if no events are running */
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* POWER-PMU does not support disabling individual counters, hence
* program their cycle counter to their max value and ignore the interrupts.
*/
static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void power_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
if (!ppmu)
return -EAGAIN;
cpuhw = &__get_cpu_var(cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
if (i < 0)
return -EAGAIN;
for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
* A event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
if (event->attr.exclude_user
|| event->attr.exclude_kernel
|| event->attr.exclude_hv
|| event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
* The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
return 0;
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
return n > 0;
}
/*
* Find an alternative event_id that goes on a normal PMC, if possible,
* and return the event_id code, or 0 if there is no such alternative.
* (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
n = ppmu->get_alternatives(ev, flags, alt);
if (!n)
return 0;
return alt[0];
}
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
struct cpu_hw_events *cpuhw;
if (!ppmu)
return -ENOENT;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config_base = ev;
event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
* exclude_hv bit to 0 so that we don't care what
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
event->attr.exclude_hv = 0;
/*
* If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
* If this machine has limited events, check whether this
* event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
* The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
return -EINVAL;
}
}
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
event->destroy = hw_perf_event_destroy;
return err;
}
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
.event_init = power_pmu_event_init,
.add = power_pmu_add,
.del = power_pmu_del,
.start = power_pmu_start,
.stop = power_pmu_stop,
.read = power_pmu_read,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL);
data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
if (perf_event_overflow(event, nmi, &data, regs))
power_pmu_stop(event, 0);
}
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
u32 flags = perf_get_misc_flags(regs);
if (flags)
return flags;
return user_mode(regs) ? PERF_RECORD_MISC_USER :
PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
* for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
static bool pmc_overflow(unsigned long val)
{
if ((int)val < 0)
return true;
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
* raise a performance monitor exception. We need to catch this to
* ensure we reset the PMC. In all cases the PMC will be 256 or less
* cycles from overflow.
*
* We only do this if the first pass fails to find any overflowing
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
return true;
return false;
}
/*
* Performance monitor interrupt stuff
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
}
static void power_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
static int __cpuinit
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
power_pmu_setup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
int register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(power_pmu_notifier);
return 0;
}
| /*
* Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
unsigned long mmcr[3];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
unsigned int group_flag;
int n_txn_start;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
/*
* Normally, to ignore kernel events we set the FCS (freeze counters
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
* and a few other names are different.
*/
#ifdef CONFIG_PPC32
#define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE
#define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
}
static inline void perf_read_regs(struct pt_regs *regs) { }
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_PPC32 */
/*
* Things that are specific to 64-bit implementations.
*/
#ifdef CONFIG_PPC64
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
return 0;
}
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling, give them the SDAR
* (sampled data address). If we are doing instruction sampling, then
* only give them the SDAR if it corresponds to the instruction
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
* bit in MMCRA.
*/
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
{
unsigned long mmcra = regs->dsisr;
unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
*addrp = mfspr(SPRN_SDAR);
}
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
unsigned long sihv = MMCRA_SIHV;
unsigned long sipr = MMCRA_SIPR;
if (TRAP(regs) != 0xf00)
return 0; /* not a PMU interrupt */
if (ppmu->flags & PPMU_ALT_SIPR) {
sihv = POWER6_MMCRA_SIHV;
sipr = POWER6_MMCRA_SIPR;
}
/* PR has priority over HV, so order below is important */
if (mmcra & sipr)
return PERF_RECORD_MISC_USER;
if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
*/
static inline void perf_read_regs(struct pt_regs *regs)
{
regs->dsisr = mfspr(SPRN_MMCRA);
}
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return !regs->softe;
}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
void perf_event_print_debug(void)
{
}
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 1:
val = mfspr(SPRN_PMC1);
break;
case 2:
val = mfspr(SPRN_PMC2);
break;
case 3:
val = mfspr(SPRN_PMC3);
break;
case 4:
val = mfspr(SPRN_PMC4);
break;
case 5:
val = mfspr(SPRN_PMC5);
break;
case 6:
val = mfspr(SPRN_PMC6);
break;
#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 1:
mtspr(SPRN_PMC1, val);
break;
case 2:
mtspr(SPRN_PMC2, val);
break;
case 3:
mtspr(SPRN_PMC3, val);
break;
case 4:
mtspr(SPRN_PMC4, val);
break;
case 5:
mtspr(SPRN_PMC5, val);
break;
case 6:
mtspr(SPRN_PMC6, val);
break;
#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
}
/*
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event_id[].
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
&& !ppmu->limited_pmc_event(event_id[i])) {
ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0]))
return -1;
}
value = mask = 0;
for (i = 0; i < n_ev; ++i) {
nv = (value | cpuhw->avalues[i][0]) +
(value & cpuhw->avalues[i][0] & addf);
if ((((nv + tadd) ^ value) & mask) != 0 ||
(((nv + tadd) ^ cpuhw->avalues[i][0]) &
cpuhw->amasks[i][0]) != 0)
break;
value = nv;
mask |= cpuhw->amasks[i][0];
}
if (i == n_ev)
return 0; /* all OK */
/* doesn't work, gather alternatives... */
if (!ppmu->get_alternatives)
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
&cpuhw->avalues[i][j]);
}
/* enumerate all possibilities and see if any will work */
i = 0;
j = -1;
value = mask = nv = 0;
while (i < n_ev) {
if (j >= 0) {
/* we're backtracking, restore context */
value = svalues[i];
mask = smasks[i];
j = choice[i];
}
/*
* See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
nv = (value | cpuhw->avalues[i][j]) +
(value & cpuhw->avalues[i][j] & addf);
if ((((nv + tadd) ^ value) & mask) == 0 &&
(((nv + tadd) ^ cpuhw->avalues[i][j])
& cpuhw->amasks[i][j]) == 0)
break;
}
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
* to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
* Found a feasible alternative for event_id i,
* remember where we got up to with this event_id,
* go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
svalues[i] = value;
smasks[i] = mask;
value = nv;
mask |= cpuhw->amasks[i][j];
++i;
j = -1;
}
}
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
* Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
* added events.
*/
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
struct perf_event *event;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; ++i) {
if (cflags[i] & PPMU_LIMITED_PMC_OK) {
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
event = ctrs[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
if (eu || ek || eh)
for (i = 0; i < n; ++i)
if (cflags[i] & PPMU_LIMITED_PMC_OK)
cflags[i] |= PPMU_LIMITED_PMC_REQD;
return 0;
}
static u64 check_and_compute_delta(u64 prev, u64 val)
{
u64 delta = (val - prev) & 0xfffffffful;
/*
* POWER7 can roll back counter values, if the new value is smaller
* than the previous value it will cause the delta and the counter to
* have bogus values unless we rolled a counter over. If a coutner is
* rolled back, it will be smaller, but within 256, which is the maximum
* number of events to rollback at once. If we dectect a rollback
* return 0. This can lead to a small lack of precision in the
* counters.
*/
if (prev > val && (prev - val) < 256)
delta = 0;
return delta;
}
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
if (!event->hw.idx)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
* us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
return (ppmu->flags & PPMU_LIMITED_PMC5_6)
&& (pmcnum == 5 || pmcnum == 6);
}
static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
if (!event->hw.idx)
continue;
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
delta = check_and_compute_delta(prev, val);
if (delta)
local64_add(delta, &event->count);
}
}
static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
u64 val, prev;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
if (check_and_compute_delta(prev, val))
local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
/*
* Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
* other events. We try to keep the values from the limited
* events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
* the limited events as small and consistent as possible.
* Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
if (!cpuhw->n_limited) {
mtspr(SPRN_MMCR0, mmcr0);
return;
}
/*
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
* events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
: "=&r" (pmc5), "=&r" (pmc6)
: "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
"i" (SPRN_MMCR0),
"i" (SPRN_PMC5), "i" (SPRN_PMC6));
if (mmcr0 & MMCR0_FC)
freeze_limited_counters(cpuhw, pmc5, pmc6);
else
thaw_limited_counters(cpuhw, pmc5, pmc6);
/*
* Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
mtspr(SPRN_MMCR0, mmcr0);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
/*
* Disable instruction sampling if it was enabled
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mtspr(SPRN_MMCRA,
cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mb();
}
/*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
mb();
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val;
s64 left;
unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
if (!ppmu)
return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
}
cpuhw->disabled = 0;
/*
* If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_events == 0)
ppc_set_pmu_inuse(0);
goto out_enable;
}
/*
* Compute MMCR* values for the new set of events
*/
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
cpuhw->mmcr)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
/*
* Add in MMCR0 freeze bits corresponding to the
* attr.exclude_* bits for the first event.
* We have already checked that all events have the
* same values for these bits as the first event.
*/
event = cpuhw->event[0];
if (event->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
if (event->attr.exclude_kernel)
cpuhw->mmcr[0] |= freeze_events_kernel;
if (event->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
* Write the new configuration to MMCR* with the freeze
* bit set and set the hardware events to their initial values.
* Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
/*
* Read off any pre-existing events that need to move
* to another PMC.
*/
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
power_pmu_read(event);
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
}
/*
* Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
cpuhw->limited_counter[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
val = 0;
if (event->hw.sample_period) {
left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
if (event->hw.state & PERF_HES_STOPPED)
val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
out_enable:
mb();
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
/*
* Enable instruction sampling if necessary
*/
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
mb();
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
flags[n] = event->hw.event_base;
events[n++] = event->hw.config;
}
}
return n;
}
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
cpuhw = &__get_cpu_var(cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
cpuhw->event[n0] = event;
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
if (!(ef_flags & PERF_EF_START))
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuhw->group_flag & PERF_EVENT_TXN)
goto nocheck;
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
event->hw.config = cpuhw->events[n0];
nocheck:
++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
/*
* Remove a event from the PMU.
*/
static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
cpuhw = &__get_cpu_var(cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
cpuhw->event[i-1] = cpuhw->event[i];
cpuhw->events[i-1] = cpuhw->events[i];
cpuhw->flags[i-1] = cpuhw->flags[i];
}
--cpuhw->n_events;
ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
if (event->hw.idx) {
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
}
perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
if (event == cpuhw->limited_counter[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
}
--cpuhw->n_limited;
}
if (cpuhw->n_events == 0) {
/* disable exceptions if no events are running */
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* POWER-PMU does not support disabling individual counters, hence
* program their cycle counter to their max value and ignore the interrupts.
*/
static void power_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void power_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
power_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
if (!ppmu)
return -EAGAIN;
cpuhw = &__get_cpu_var(cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
if (i < 0)
return -EAGAIN;
for (i = cpuhw->n_txn_start; i < n; ++i)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
* A event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
if (event->attr.exclude_user
|| event->attr.exclude_kernel
|| event->attr.exclude_hv
|| event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
* The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
return 0;
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
return n > 0;
}
/*
* Find an alternative event_id that goes on a normal PMC, if possible,
* and return the event_id code, or 0 if there is no such alternative.
* (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
n = ppmu->get_alternatives(ev, flags, alt);
if (!n)
return 0;
return alt[0];
}
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
struct cpu_hw_events *cpuhw;
if (!ppmu)
return -ENOENT;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config_base = ev;
event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
* exclude_hv bit to 0 so that we don't care what
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
event->attr.exclude_hv = 0;
/*
* If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
* If this machine has limited events, check whether this
* event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
* The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
return -EINVAL;
}
}
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
event->destroy = hw_perf_event_destroy;
return err;
}
struct pmu power_pmu = {
.pmu_enable = power_pmu_enable,
.pmu_disable = power_pmu_disable,
.event_init = power_pmu_event_init,
.add = power_pmu_add,
.del = power_pmu_del,
.start = power_pmu_start,
.stop = power_pmu_stop,
.read = power_pmu_read,
.start_txn = power_pmu_start_txn,
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, ~0ULL);
data.period = event->hw.last_period;
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
* for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
u32 flags = perf_get_misc_flags(regs);
if (flags)
return flags;
return user_mode(regs) ? PERF_RECORD_MISC_USER :
PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
* for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
static bool pmc_overflow(unsigned long val)
{
if ((int)val < 0)
return true;
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
* raise a performance monitor exception. We need to catch this to
* ensure we reset the PMC. In all cases the PMC will be 256 or less
* cycles from overflow.
*
* We only do this if the first pass fails to find any overflowing
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
return true;
return false;
}
/*
* Performance monitor interrupt stuff
*/
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < cpuhw->n_events; ++i) {
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
if ((int)val < 0) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs);
}
}
/*
* In case we didn't find and reset the event that caused
* the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
for (i = 0; i < ppmu->n_counter; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
if (pmc_overflow(val))
write_pmc(i + 1, 0);
}
}
/*
* Reset MMCR0 to its normal value. This will set PMXE and
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
if (nmi)
nmi_exit();
else
irq_exit();
}
static void power_pmu_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
static int __cpuinit
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
power_pmu_setup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
int register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(power_pmu_notifier);
return 0;
}
|
195,972,410,588,859 | /*
* Performance event support - Freescale Embedded Performance Monitor
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg_fsl_emb.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int disabled;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return !regs->softe;
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 0:
val = mfpmr(PMRN_PMC0);
break;
case 1:
val = mfpmr(PMRN_PMC1);
break;
case 2:
val = mfpmr(PMRN_PMC2);
break;
case 3:
val = mfpmr(PMRN_PMC3);
break;
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/*
* Write one local control A register
*/
static void write_pmlca(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCA0, val);
break;
case 1:
mtpmr(PMRN_PMLCA1, val);
break;
case 2:
mtpmr(PMRN_PMLCA2, val);
break;
case 3:
mtpmr(PMRN_PMLCA3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/*
* Write one local control B register
*/
static void write_pmlcb(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCB0, val);
break;
case 1:
mtpmr(PMRN_PMLCB1, val);
break;
case 2:
mtpmr(PMRN_PMLCB2, val);
break;
case 3:
mtpmr(PMRN_PMLCB3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
isync();
}
static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) {
/*
* Set the 'freeze all counters' bit, and disable
* interrupts. The barrier is to make sure the
* mtpmr has been executed and the PMU has frozen
* the events before we return.
*/
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled)
goto out;
cpuhw->disabled = 0;
ppc_set_pmu_inuse(cpuhw->n_events != 0);
if (cpuhw->n_events > 0) {
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[])
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
n++;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
n++;
}
}
return n;
}
/* context locked on entry */
static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
int num_counters = ppmu->n_counter;
u64 val;
int i;
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/*
* Allocate counters from top-down, so that restricted-capable
* counters are kept free as long as possible.
*/
for (i = num_counters - 1; i >= 0; i--) {
if (cpuhw->event[i])
continue;
break;
}
if (i < 0)
goto out;
event->hw.idx = i;
cpuhw->event[i] = event;
++cpuhw->n_events;
val = 0;
if (event->hw.sample_period) {
s64 left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
if (!(flags & PERF_EF_START)) {
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
val = 0;
}
write_pmc(i, val);
perf_event_update_userpage(event);
write_pmlcb(i, event->hw.config >> 32);
write_pmlca(i, event->hw.config_base);
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_pmu_enable(event->pmu);
return ret;
}
/* context locked on entry */
static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
fsl_emb_pmu_read(event);
cpuhw = &get_cpu_var(cpu_hw_events);
WARN_ON(event != cpuhw->event[event->hw.idx]);
write_pmlca(i, 0);
write_pmlcb(i, 0);
write_pmc(i, 0);
cpuhw->event[i] = NULL;
event->hw.idx = -1;
/*
* TODO: if at least one restricted event exists, and we
* just freed up a non-restricted-capable counter, and
* there is a restricted-capable counter occupied by
* a non-restricted event, migrate that event to the
* vacated counter.
*/
cpuhw->n_events--;
out:
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
int n;
int err;
int num_restricted;
int i;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0;
for (i = 0; i < n; i++) {
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted)
return -EINVAL;
}
event->hw.idx = -1;
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
(u32)((ev << 16) & PMLCA_EVENT_MASK);
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU;
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
event->destroy = hw_perf_event_destroy;
return err;
}
static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_enable,
.pmu_disable = fsl_emb_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.add = fsl_emb_pmu_add,
.del = fsl_emb_pmu_del,
.start = fsl_emb_pmu_start,
.stop = fsl_emb_pmu_stop,
.read = fsl_emb_pmu_read,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, nmi, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i);
if ((int)val < 0) {
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs, nmi);
} else {
/*
* Disabled counter is negative,
* reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
if (nmi)
nmi_exit();
else
irq_exit();
}
void hw_perf_event_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(*cpuhw));
}
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
| /*
* Performance event support - Freescale Embedded Performance Monitor
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg_fsl_emb.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int disabled;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return !regs->softe;
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 0:
val = mfpmr(PMRN_PMC0);
break;
case 1:
val = mfpmr(PMRN_PMC1);
break;
case 2:
val = mfpmr(PMRN_PMC2);
break;
case 3:
val = mfpmr(PMRN_PMC3);
break;
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/*
* Write one local control A register
*/
static void write_pmlca(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCA0, val);
break;
case 1:
mtpmr(PMRN_PMLCA1, val);
break;
case 2:
mtpmr(PMRN_PMLCA2, val);
break;
case 3:
mtpmr(PMRN_PMLCA3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/*
* Write one local control B register
*/
static void write_pmlcb(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCB0, val);
break;
case 1:
mtpmr(PMRN_PMLCB1, val);
break;
case 2:
mtpmr(PMRN_PMLCB2, val);
break;
case 3:
mtpmr(PMRN_PMLCB3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
isync();
}
static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) {
/*
* Set the 'freeze all counters' bit, and disable
* interrupts. The barrier is to make sure the
* mtpmr has been executed and the PMU has frozen
* the events before we return.
*/
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled)
goto out;
cpuhw->disabled = 0;
ppc_set_pmu_inuse(cpuhw->n_events != 0);
if (cpuhw->n_events > 0) {
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[])
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
n++;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
n++;
}
}
return n;
}
/* context locked on entry */
static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
int num_counters = ppmu->n_counter;
u64 val;
int i;
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/*
* Allocate counters from top-down, so that restricted-capable
* counters are kept free as long as possible.
*/
for (i = num_counters - 1; i >= 0; i--) {
if (cpuhw->event[i])
continue;
break;
}
if (i < 0)
goto out;
event->hw.idx = i;
cpuhw->event[i] = event;
++cpuhw->n_events;
val = 0;
if (event->hw.sample_period) {
s64 left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
if (!(flags & PERF_EF_START)) {
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
val = 0;
}
write_pmc(i, val);
perf_event_update_userpage(event);
write_pmlcb(i, event->hw.config >> 32);
write_pmlca(i, event->hw.config_base);
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_pmu_enable(event->pmu);
return ret;
}
/* context locked on entry */
static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
fsl_emb_pmu_read(event);
cpuhw = &get_cpu_var(cpu_hw_events);
WARN_ON(event != cpuhw->event[event->hw.idx]);
write_pmlca(i, 0);
write_pmlcb(i, 0);
write_pmc(i, 0);
cpuhw->event[i] = NULL;
event->hw.idx = -1;
/*
* TODO: if at least one restricted event exists, and we
* just freed up a non-restricted-capable counter, and
* there is a restricted-capable counter occupied by
* a non-restricted event, migrate that event to the
* vacated counter.
*/
cpuhw->n_events--;
out:
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
int n;
int err;
int num_restricted;
int i;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0;
for (i = 0; i < n; i++) {
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted)
return -EINVAL;
}
event->hw.idx = -1;
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
(u32)((ev << 16) & PMLCA_EVENT_MASK);
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU;
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
event->destroy = hw_perf_event_destroy;
return err;
}
static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_enable,
.pmu_disable = fsl_emb_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.add = fsl_emb_pmu_add,
.del = fsl_emb_pmu_del,
.start = fsl_emb_pmu_start,
.stop = fsl_emb_pmu_stop,
.read = fsl_emb_pmu_read,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i);
if ((int)val < 0) {
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs);
} else {
/*
* Disabled counter is negative,
* reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
if (nmi)
nmi_exit();
else
irq_exit();
}
void hw_perf_event_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(*cpuhw));
}
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
|
267,799,260,702,363 | /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <trace/syscall.h>
#ifdef CONFIG_PPC32
#include <linux/module.h>
#endif
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* The parameter save area on the stack is used to store arguments being passed
* to callee function and is located at fixed offset from stack pointer.
*/
#ifdef CONFIG_PPC32
#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
#else /* CONFIG_PPC32 */
#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
GPR_OFFSET_NAME(2),
GPR_OFFSET_NAME(3),
GPR_OFFSET_NAME(4),
GPR_OFFSET_NAME(5),
GPR_OFFSET_NAME(6),
GPR_OFFSET_NAME(7),
GPR_OFFSET_NAME(8),
GPR_OFFSET_NAME(9),
GPR_OFFSET_NAME(10),
GPR_OFFSET_NAME(11),
GPR_OFFSET_NAME(12),
GPR_OFFSET_NAME(13),
GPR_OFFSET_NAME(14),
GPR_OFFSET_NAME(15),
GPR_OFFSET_NAME(16),
GPR_OFFSET_NAME(17),
GPR_OFFSET_NAME(18),
GPR_OFFSET_NAME(19),
GPR_OFFSET_NAME(20),
GPR_OFFSET_NAME(21),
GPR_OFFSET_NAME(22),
GPR_OFFSET_NAME(23),
GPR_OFFSET_NAME(24),
GPR_OFFSET_NAME(25),
GPR_OFFSET_NAME(26),
GPR_OFFSET_NAME(27),
GPR_OFFSET_NAME(28),
GPR_OFFSET_NAME(29),
GPR_OFFSET_NAME(30),
GPR_OFFSET_NAME(31),
REG_OFFSET_NAME(nip),
REG_OFFSET_NAME(msr),
REG_OFFSET_NAME(ctr),
REG_OFFSET_NAME(link),
REG_OFFSET_NAME(xer),
REG_OFFSET_NAME(ccr),
#ifdef CONFIG_PPC64
REG_OFFSET_NAME(softe),
#else
REG_OFFSET_NAME(mq),
#endif
REG_OFFSET_NAME(trap),
REG_OFFSET_NAME(dar),
REG_OFFSET_NAME(dsisr),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Set of msr bits that gdb can change on behalf of a process.
*/
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
#define MSR_DEBUGCHANGE 0
#else
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
#endif
/*
* Max register writeable via put_reg
*/
#ifdef CONFIG_PPC32
#define PT_MAX_PUT_REG PT_MQ
#else
#define PT_MAX_PUT_REG PT_CCR
#endif
static unsigned long get_user_msr(struct task_struct *task)
{
return task->thread.regs->msr | task->thread.fpexc_mode;
}
static int set_user_msr(struct task_struct *task, unsigned long msr)
{
task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
/*
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
*/
static int set_user_trap(struct task_struct *task, unsigned long trap)
{
task->thread.regs->trap = trap & 0xfff0;
return 0;
}
/*
* Get contents of register REGNO in task TASK.
*/
unsigned long ptrace_get_reg(struct task_struct *task, int regno)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return get_user_msr(task);
if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
return ((unsigned long *)task->thread.regs)[regno];
return -EIO;
}
/*
* Write contents of register REGNO in task TASK.
*/
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return set_user_msr(task, data);
if (regno == PT_TRAP)
return set_user_trap(task, data);
if (regno <= PT_MAX_PUT_REG) {
((unsigned long *)task->thread.regs)[regno] = data;
return 0;
}
return -EIO;
}
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int i, ret;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, offsetof(struct pt_regs, msr));
if (!ret) {
unsigned long msr = get_user_msr(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
offsetof(struct pt_regs, msr),
offsetof(struct pt_regs, msr) +
sizeof(msr));
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_trap(target, reg);
}
if (!ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
#ifdef CONFIG_ALTIVEC
/*
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
* vrsave as the first word (offset 0) within the quadword.
*
* This definition of the VMX state is compatible with the current PPC32
* ptrace interface. This allows signal handling and ptrace to use the
* same structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
static int vr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_altivec_to_thread(target);
return target->thread.used_vr ? regset->n : 0;
}
static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
offsetof(struct thread_struct, vr[32]));
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.vr, 0,
33 * sizeof(vector128));
if (!ret) {
/*
* Copy out only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
return ret;
}
static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
offsetof(struct thread_struct, vr[32]));
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr, 0, 33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
target->thread.vrsave = vrsave.word;
}
return ret;
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
/*
* Currently to set and and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
static int vsr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
double buf[32];
int ret, i;
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
double buf[32];
int ret,i;
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/*
* For get_evrregs/set_evrregs functions 'data' has the following layout:
*
* struct {
* u32 evr[32];
* u64 acc;
* u32 spefscr;
* }
*/
static int evr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_spe_to_thread(target);
return target->thread.used_spe ? regset->n : 0;
}
static int evr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
static int evr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
#endif /* CONFIG_SPE */
/*
* These are our native regset flavors.
*/
enum powerpc_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_ALTIVEC
REGSET_VMX,
#endif
#ifdef CONFIG_VSX
REGSET_VSX,
#endif
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
};
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_native_view = {
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_PPC64
#include <linux/compat.h>
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const unsigned long *regs = &target->thread.regs->gpr[0];
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_MSR; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) {
reg = get_user_msr(target);
if (kbuf)
*k++ = reg;
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
}
if (kbuf)
for (; count > 0 && pos < PT_REGS_COUNT; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
PT_REGS_COUNT * sizeof(reg), -1);
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *regs = &target->thread.regs->gpr[0];
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
regs[pos++] = *k++;
else
for (; count > 0 && pos < PT_MSR; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
if (count > 0 && pos == PT_MSR) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_msr(target, reg);
++pos;
--count;
}
if (kbuf) {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
regs[pos++] = *k++;
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
++k;
} else {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
if (__get_user(reg, u++))
return -EFAULT;
}
if (count > 0 && pos == PT_TRAP) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_trap(target, reg);
++pos;
--count;
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
}
/*
* These are the regset flavors matching the CONFIG_PPC32 native set.
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.get = gpr32_get, .set = gpr32_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_compat_view = {
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
};
#endif /* CONFIG_PPC64 */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_PPC64
if (test_tsk_thread_flag(task, TIF_32BIT))
return &user_ppc_compat_view;
#endif
return &user_ppc_native_view;
}
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.dbcr0 &= ~DBCR0_BT;
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.dbcr0 &= ~DBCR0_IC;
task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
regs->msr |= MSR_BE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* The logic to disable single stepping should be as
* simple as turning off the Instruction Complete flag.
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
task->thread.dbcr0 &= ~DBCR0_IC;
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
task->thread.dbcr1)) {
/*
* All debug events were off.....
*/
task->thread.dbcr0 &= ~DBCR0_IDM;
regs->msr &= ~MSR_DE;
}
#else
regs->msr &= ~(MSR_SE | MSR_BE);
#endif
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void ptrace_triggered(struct perf_event *bp, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions in PPC64.
* The SIGTRAP signal is generated automatically for us in do_dabr().
* We don't have to do anything about that here
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &(task->thread);
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
* passed together with the data address, fitting the design of the
* DABR register, as follows:
*
* bit 0: Read flag
* bit 1: Write flag
* bit 2: Breakpoint translation
*
* Thus, we use them here as so.
*/
/* Ensure breakpoint translation bit is set */
if (data && !(data & DABR_TRANSLATION))
return -EIO;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (ptrace_get_breakpoints(task) < 0)
return -ESRCH;
bp = thread->ptrace_bps[0];
if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
ptrace_put_breakpoints(task);
return 0;
}
if (bp) {
attr = bp->attr;
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
arch_bp_generic_fields(data &
(DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret) {
ptrace_put_breakpoints(task);
return ret;
}
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
thread->dabr = data;
return 0;
}
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
ptrace_put_breakpoints(task);
return PTR_ERR(bp);
}
ptrace_put_breakpoints(task);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* Move contents to the DABR register */
task->thread.dabr = data;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
* as to not cause alignment restrictions for DAC-based processors.
*/
/* DAC's hold the whole address without any mode flags */
task->thread.dac1 = data & ~0x3UL;
if (task->thread.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
task->thread.dbcr1)) {
task->thread.regs->msr &= ~MSR_DE;
task->thread.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
/* Read or Write bits must be set */
if (!(data & 0x3UL))
return -EINVAL;
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
register */
task->thread.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0
accordingly */
dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
if (data & 0x1UL)
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
task->thread.regs->msr |= MSR_DE;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
return 0;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
user_disable_single_step(child);
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static long set_intruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
slot4_in_use = 1;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
/* Make sure range is valid. */
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
/* We need a pair of IAC regsisters */
if ((!slot1_in_use) && (!slot2_in_use)) {
slot = 1;
child->thread.iac1 = bp_info->addr;
child->thread.iac2 = bp_info->addr2;
child->thread.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
else
dbcr_iac_range(child) |= DBCR_IAC12I;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
child->thread.iac3 = bp_info->addr;
child->thread.iac4 = bp_info->addr2;
child->thread.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
else
dbcr_iac_range(child) |= DBCR_IAC34I;
#endif
} else
return -ENOSPC;
} else {
/* We only need one. If possible leave a pair free in
* case a range is needed later
*/
if (!slot1_in_use) {
/*
* Don't use iac1 if iac1-iac2 are free and either
* iac3 or iac4 (but not both) are free
*/
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
slot = 1;
child->thread.iac1 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
child->thread.iac2 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
child->thread.iac3 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
child->thread.iac4 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC4;
#endif
} else
return -ENOSPC;
}
out:
child->thread.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot;
}
static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
child->thread.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
child->thread.iac1 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
child->thread.iac2 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
child->thread.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
child->thread.iac3 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
child->thread.iac4 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int byte_enable =
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
& 0xf;
int condition_mode =
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
int slot;
if (byte_enable && (condition_mode == 0))
return -EINVAL;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
slot = 1;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
child->thread.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.dvc1 =
(unsigned long)bp_info->condition_value;
child->thread.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
} else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
slot = 2;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
child->thread.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.dvc2 =
(unsigned long)bp_info->condition_value;
child->thread.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else
return -ENOSPC;
child->thread.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot + 4;
}
static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
child->thread.dac2 = 0;
child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.dvc2 = 0;
#endif
child->thread.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else
return -EINVAL;
return 0;
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
static int set_dac_range(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
/* We don't allow range watchpoints to be used with DVC */
if (bp_info->condition_mode)
return -EINVAL;
/*
* Best effort to verify the address range. The user/supervisor bits
* prevent trapping in kernel space, but let's fail on an obvious bad
* range. The simple test on the mask is not fool-proof, and any
* exclusive range will spill over into kernel space.
*/
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (mode == PPC_BREAKPOINT_MODE_MASK) {
/*
* dac2 is a bitmask. Don't allow a mask that makes a
* kernel space address from a valid dac1 value
*/
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
return -EIO;
} else {
/*
* For range breakpoints, addr2 must also be a valid address
*/
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
}
if (child->thread.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
child->thread.dac1 = bp_info->addr;
child->thread.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
child->thread.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
child->thread.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.dbcr2 |= DBCR2_DAC12MM;
child->thread.regs->msr |= MSR_DE;
return 5;
}
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
static long ppc_set_hwdebug(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr;
#endif
if (bp_info->version != 1)
return -ENOTSUPP;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Check for invalid flags and combinations
*/
if ((bp_info->trigger_type == 0) ||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
PPC_BREAKPOINT_TRIGGER_RW)) ||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
(bp_info->condition_mode &
~(PPC_BREAKPOINT_CONDITION_MODE |
PPC_BREAKPOINT_CONDITION_BE_ALL)))
return -EINVAL;
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
#endif
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
return -EINVAL;
return set_intruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
return set_dac_range(child, bp_info);
#else
return -EINVAL;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
/*
* We only support one data breakpoint
*/
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
if (child->thread.dabr)
return -ENOSPC;
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
dabr = (unsigned long)bp_info->addr & ~7UL;
dabr |= DABR_TRANSLATION;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dabr |= DABR_DATA_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dabr |= DABR_DATA_WRITE;
child->thread.dabr = dabr;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
}
static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
int rc;
if (data <= 4)
rc = del_instruction_bp(child, (int)data);
else
rc = del_dac(child, (int)data - 4);
if (!rc) {
if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
child->thread.dbcr1)) {
child->thread.dbcr0 &= ~DBCR0_IDM;
child->thread.regs->msr &= ~MSR_DE;
}
}
return rc;
#else
if (data != 1)
return -EINVAL;
if (child->thread.dabr == 0)
return -ENOENT;
child->thread.dabr = 0;
return 0;
#endif
}
/*
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
static long arch_ptrace_old(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
void __user *datavp = (void __user *) data;
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
}
return -EPERM;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EPERM;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long index, tmp;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
tmp = ptrace_get_reg(child, (int) index);
} else {
flush_fp_to_thread(child);
tmp = ((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)];
}
ret = put_user(tmp, datalp);
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)] = data;
ret = 0;
}
break;
}
case PPC_PTRACE_GETHWDBGINFO: {
struct ppc_debug_info dbginfo;
dbginfo.version = 1;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
dbginfo.data_bp_alignment = 4;
dbginfo.sizeof_condition = 4;
dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
PPC_DEBUG_FEATURE_INSN_BP_MASK;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
dbginfo.features |=
PPC_DEBUG_FEATURE_DATA_BP_RANGE |
PPC_DEBUG_FEATURE_DATA_BP_MASK;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
dbginfo.num_instruction_bps = 0;
dbginfo.num_data_bps = 1;
dbginfo.num_condition_regs = 0;
#ifdef CONFIG_PPC64
dbginfo.data_bp_alignment = 8;
#else
dbginfo.data_bp_alignment = 4;
#endif
dbginfo.sizeof_condition = 0;
dbginfo.features = 0;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
ret = __copy_to_user(datavp, &dbginfo,
sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
ret = ppc_set_hwdebug(child, &bp_info);
break;
}
case PPC_PTRACE_DELHWDEBUG: {
ret = ppc_del_hwdebug(child, addr, data);
break;
}
case PTRACE_GET_DEBUGREG: {
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, datalp);
#else
ret = put_user(child->thread.dabr, datalp);
#endif
break;
}
case PTRACE_SET_DEBUGREG:
ret = ptrace_set_debugreg(child, addr, data);
break;
#ifdef CONFIG_PPC64
case PTRACE_GETREGS64:
#endif
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
#endif
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
#endif
/* Old reverse args ptrace callss */
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
ret = arch_ptrace_old(child, request, addr, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->gpr[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
if (!is_32bit_task())
audit_syscall_entry(AUDIT_ARCH_PPC64,
regs->gpr[0],
regs->gpr[3], regs->gpr[4],
regs->gpr[5], regs->gpr[6]);
else
#endif
audit_syscall_entry(AUDIT_ARCH_PPC,
regs->gpr[0],
regs->gpr[3] & 0xffffffff,
regs->gpr[4] & 0xffffffff,
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
}
return ret ?: regs->gpr[0];
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
regs->result);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
| /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <trace/syscall.h>
#ifdef CONFIG_PPC32
#include <linux/module.h>
#endif
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* The parameter save area on the stack is used to store arguments being passed
* to callee function and is located at fixed offset from stack pointer.
*/
#ifdef CONFIG_PPC32
#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
#else /* CONFIG_PPC32 */
#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
GPR_OFFSET_NAME(2),
GPR_OFFSET_NAME(3),
GPR_OFFSET_NAME(4),
GPR_OFFSET_NAME(5),
GPR_OFFSET_NAME(6),
GPR_OFFSET_NAME(7),
GPR_OFFSET_NAME(8),
GPR_OFFSET_NAME(9),
GPR_OFFSET_NAME(10),
GPR_OFFSET_NAME(11),
GPR_OFFSET_NAME(12),
GPR_OFFSET_NAME(13),
GPR_OFFSET_NAME(14),
GPR_OFFSET_NAME(15),
GPR_OFFSET_NAME(16),
GPR_OFFSET_NAME(17),
GPR_OFFSET_NAME(18),
GPR_OFFSET_NAME(19),
GPR_OFFSET_NAME(20),
GPR_OFFSET_NAME(21),
GPR_OFFSET_NAME(22),
GPR_OFFSET_NAME(23),
GPR_OFFSET_NAME(24),
GPR_OFFSET_NAME(25),
GPR_OFFSET_NAME(26),
GPR_OFFSET_NAME(27),
GPR_OFFSET_NAME(28),
GPR_OFFSET_NAME(29),
GPR_OFFSET_NAME(30),
GPR_OFFSET_NAME(31),
REG_OFFSET_NAME(nip),
REG_OFFSET_NAME(msr),
REG_OFFSET_NAME(ctr),
REG_OFFSET_NAME(link),
REG_OFFSET_NAME(xer),
REG_OFFSET_NAME(ccr),
#ifdef CONFIG_PPC64
REG_OFFSET_NAME(softe),
#else
REG_OFFSET_NAME(mq),
#endif
REG_OFFSET_NAME(trap),
REG_OFFSET_NAME(dar),
REG_OFFSET_NAME(dsisr),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Set of msr bits that gdb can change on behalf of a process.
*/
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
#define MSR_DEBUGCHANGE 0
#else
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
#endif
/*
* Max register writeable via put_reg
*/
#ifdef CONFIG_PPC32
#define PT_MAX_PUT_REG PT_MQ
#else
#define PT_MAX_PUT_REG PT_CCR
#endif
static unsigned long get_user_msr(struct task_struct *task)
{
return task->thread.regs->msr | task->thread.fpexc_mode;
}
static int set_user_msr(struct task_struct *task, unsigned long msr)
{
task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
/*
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
*/
static int set_user_trap(struct task_struct *task, unsigned long trap)
{
task->thread.regs->trap = trap & 0xfff0;
return 0;
}
/*
* Get contents of register REGNO in task TASK.
*/
unsigned long ptrace_get_reg(struct task_struct *task, int regno)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return get_user_msr(task);
if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
return ((unsigned long *)task->thread.regs)[regno];
return -EIO;
}
/*
* Write contents of register REGNO in task TASK.
*/
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return set_user_msr(task, data);
if (regno == PT_TRAP)
return set_user_trap(task, data);
if (regno <= PT_MAX_PUT_REG) {
((unsigned long *)task->thread.regs)[regno] = data;
return 0;
}
return -EIO;
}
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int i, ret;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, offsetof(struct pt_regs, msr));
if (!ret) {
unsigned long msr = get_user_msr(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
offsetof(struct pt_regs, msr),
offsetof(struct pt_regs, msr) +
sizeof(msr));
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_trap(target, reg);
}
if (!ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
#ifdef CONFIG_ALTIVEC
/*
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
* vrsave as the first word (offset 0) within the quadword.
*
* This definition of the VMX state is compatible with the current PPC32
* ptrace interface. This allows signal handling and ptrace to use the
* same structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
static int vr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_altivec_to_thread(target);
return target->thread.used_vr ? regset->n : 0;
}
static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
offsetof(struct thread_struct, vr[32]));
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.vr, 0,
33 * sizeof(vector128));
if (!ret) {
/*
* Copy out only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
return ret;
}
static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
offsetof(struct thread_struct, vr[32]));
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr, 0, 33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
target->thread.vrsave = vrsave.word;
}
return ret;
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
/*
* Currently to set and and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
static int vsr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
double buf[32];
int ret, i;
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
double buf[32];
int ret,i;
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/*
* For get_evrregs/set_evrregs functions 'data' has the following layout:
*
* struct {
* u32 evr[32];
* u64 acc;
* u32 spefscr;
* }
*/
static int evr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_spe_to_thread(target);
return target->thread.used_spe ? regset->n : 0;
}
static int evr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
static int evr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
#endif /* CONFIG_SPE */
/*
* These are our native regset flavors.
*/
enum powerpc_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_ALTIVEC
REGSET_VMX,
#endif
#ifdef CONFIG_VSX
REGSET_VSX,
#endif
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
};
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_native_view = {
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_PPC64
#include <linux/compat.h>
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const unsigned long *regs = &target->thread.regs->gpr[0];
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_MSR; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) {
reg = get_user_msr(target);
if (kbuf)
*k++ = reg;
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
}
if (kbuf)
for (; count > 0 && pos < PT_REGS_COUNT; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
PT_REGS_COUNT * sizeof(reg), -1);
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *regs = &target->thread.regs->gpr[0];
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
regs[pos++] = *k++;
else
for (; count > 0 && pos < PT_MSR; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
if (count > 0 && pos == PT_MSR) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_msr(target, reg);
++pos;
--count;
}
if (kbuf) {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
regs[pos++] = *k++;
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
++k;
} else {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
if (__get_user(reg, u++))
return -EFAULT;
}
if (count > 0 && pos == PT_TRAP) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_trap(target, reg);
++pos;
--count;
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
}
/*
* These are the regset flavors matching the CONFIG_PPC32 native set.
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.get = gpr32_get, .set = gpr32_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_compat_view = {
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
};
#endif /* CONFIG_PPC64 */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_PPC64
if (test_tsk_thread_flag(task, TIF_32BIT))
return &user_ppc_compat_view;
#endif
return &user_ppc_native_view;
}
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.dbcr0 &= ~DBCR0_BT;
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.dbcr0 &= ~DBCR0_IC;
task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
regs->msr |= MSR_BE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* The logic to disable single stepping should be as
* simple as turning off the Instruction Complete flag.
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
task->thread.dbcr0 &= ~DBCR0_IC;
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
task->thread.dbcr1)) {
/*
* All debug events were off.....
*/
task->thread.dbcr0 &= ~DBCR0_IDM;
regs->msr &= ~MSR_DE;
}
#else
regs->msr &= ~(MSR_SE | MSR_BE);
#endif
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions in PPC64.
* The SIGTRAP signal is generated automatically for us in do_dabr().
* We don't have to do anything about that here
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &(task->thread);
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
* passed together with the data address, fitting the design of the
* DABR register, as follows:
*
* bit 0: Read flag
* bit 1: Write flag
* bit 2: Breakpoint translation
*
* Thus, we use them here as so.
*/
/* Ensure breakpoint translation bit is set */
if (data && !(data & DABR_TRANSLATION))
return -EIO;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (ptrace_get_breakpoints(task) < 0)
return -ESRCH;
bp = thread->ptrace_bps[0];
if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
ptrace_put_breakpoints(task);
return 0;
}
if (bp) {
attr = bp->attr;
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
arch_bp_generic_fields(data &
(DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret) {
ptrace_put_breakpoints(task);
return ret;
}
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
thread->dabr = data;
return 0;
}
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
ptrace_put_breakpoints(task);
return PTR_ERR(bp);
}
ptrace_put_breakpoints(task);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* Move contents to the DABR register */
task->thread.dabr = data;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
* as to not cause alignment restrictions for DAC-based processors.
*/
/* DAC's hold the whole address without any mode flags */
task->thread.dac1 = data & ~0x3UL;
if (task->thread.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
task->thread.dbcr1)) {
task->thread.regs->msr &= ~MSR_DE;
task->thread.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
/* Read or Write bits must be set */
if (!(data & 0x3UL))
return -EINVAL;
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
register */
task->thread.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0
accordingly */
dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
if (data & 0x1UL)
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
task->thread.regs->msr |= MSR_DE;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
return 0;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
user_disable_single_step(child);
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static long set_intruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
slot4_in_use = 1;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
/* Make sure range is valid. */
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
/* We need a pair of IAC regsisters */
if ((!slot1_in_use) && (!slot2_in_use)) {
slot = 1;
child->thread.iac1 = bp_info->addr;
child->thread.iac2 = bp_info->addr2;
child->thread.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
else
dbcr_iac_range(child) |= DBCR_IAC12I;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
child->thread.iac3 = bp_info->addr;
child->thread.iac4 = bp_info->addr2;
child->thread.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
else
dbcr_iac_range(child) |= DBCR_IAC34I;
#endif
} else
return -ENOSPC;
} else {
/* We only need one. If possible leave a pair free in
* case a range is needed later
*/
if (!slot1_in_use) {
/*
* Don't use iac1 if iac1-iac2 are free and either
* iac3 or iac4 (but not both) are free
*/
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
slot = 1;
child->thread.iac1 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
child->thread.iac2 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
child->thread.iac3 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
child->thread.iac4 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC4;
#endif
} else
return -ENOSPC;
}
out:
child->thread.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot;
}
static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
child->thread.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
child->thread.iac1 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
child->thread.iac2 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
child->thread.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
child->thread.iac3 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
child->thread.iac4 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int byte_enable =
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
& 0xf;
int condition_mode =
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
int slot;
if (byte_enable && (condition_mode == 0))
return -EINVAL;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
slot = 1;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
child->thread.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.dvc1 =
(unsigned long)bp_info->condition_value;
child->thread.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
} else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
slot = 2;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
child->thread.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.dvc2 =
(unsigned long)bp_info->condition_value;
child->thread.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else
return -ENOSPC;
child->thread.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot + 4;
}
static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
child->thread.dac2 = 0;
child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.dvc2 = 0;
#endif
child->thread.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else
return -EINVAL;
return 0;
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
static int set_dac_range(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
/* We don't allow range watchpoints to be used with DVC */
if (bp_info->condition_mode)
return -EINVAL;
/*
* Best effort to verify the address range. The user/supervisor bits
* prevent trapping in kernel space, but let's fail on an obvious bad
* range. The simple test on the mask is not fool-proof, and any
* exclusive range will spill over into kernel space.
*/
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (mode == PPC_BREAKPOINT_MODE_MASK) {
/*
* dac2 is a bitmask. Don't allow a mask that makes a
* kernel space address from a valid dac1 value
*/
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
return -EIO;
} else {
/*
* For range breakpoints, addr2 must also be a valid address
*/
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
}
if (child->thread.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
child->thread.dac1 = bp_info->addr;
child->thread.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
child->thread.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
child->thread.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.dbcr2 |= DBCR2_DAC12MM;
child->thread.regs->msr |= MSR_DE;
return 5;
}
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
static long ppc_set_hwdebug(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr;
#endif
if (bp_info->version != 1)
return -ENOTSUPP;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Check for invalid flags and combinations
*/
if ((bp_info->trigger_type == 0) ||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
PPC_BREAKPOINT_TRIGGER_RW)) ||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
(bp_info->condition_mode &
~(PPC_BREAKPOINT_CONDITION_MODE |
PPC_BREAKPOINT_CONDITION_BE_ALL)))
return -EINVAL;
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
#endif
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
return -EINVAL;
return set_intruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
return set_dac_range(child, bp_info);
#else
return -EINVAL;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
/*
* We only support one data breakpoint
*/
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
if (child->thread.dabr)
return -ENOSPC;
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
dabr = (unsigned long)bp_info->addr & ~7UL;
dabr |= DABR_TRANSLATION;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dabr |= DABR_DATA_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dabr |= DABR_DATA_WRITE;
child->thread.dabr = dabr;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
}
static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
int rc;
if (data <= 4)
rc = del_instruction_bp(child, (int)data);
else
rc = del_dac(child, (int)data - 4);
if (!rc) {
if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
child->thread.dbcr1)) {
child->thread.dbcr0 &= ~DBCR0_IDM;
child->thread.regs->msr &= ~MSR_DE;
}
}
return rc;
#else
if (data != 1)
return -EINVAL;
if (child->thread.dabr == 0)
return -ENOENT;
child->thread.dabr = 0;
return 0;
#endif
}
/*
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
static long arch_ptrace_old(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
void __user *datavp = (void __user *) data;
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
}
return -EPERM;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EPERM;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long index, tmp;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
tmp = ptrace_get_reg(child, (int) index);
} else {
flush_fp_to_thread(child);
tmp = ((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)];
}
ret = put_user(tmp, datalp);
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
((unsigned long *)child->thread.fpr)
[TS_FPRWIDTH * (index - PT_FPR0)] = data;
ret = 0;
}
break;
}
case PPC_PTRACE_GETHWDBGINFO: {
struct ppc_debug_info dbginfo;
dbginfo.version = 1;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
dbginfo.data_bp_alignment = 4;
dbginfo.sizeof_condition = 4;
dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
PPC_DEBUG_FEATURE_INSN_BP_MASK;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
dbginfo.features |=
PPC_DEBUG_FEATURE_DATA_BP_RANGE |
PPC_DEBUG_FEATURE_DATA_BP_MASK;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
dbginfo.num_instruction_bps = 0;
dbginfo.num_data_bps = 1;
dbginfo.num_condition_regs = 0;
#ifdef CONFIG_PPC64
dbginfo.data_bp_alignment = 8;
#else
dbginfo.data_bp_alignment = 4;
#endif
dbginfo.sizeof_condition = 0;
dbginfo.features = 0;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
ret = __copy_to_user(datavp, &dbginfo,
sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
ret = ppc_set_hwdebug(child, &bp_info);
break;
}
case PPC_PTRACE_DELHWDEBUG: {
ret = ppc_del_hwdebug(child, addr, data);
break;
}
case PTRACE_GET_DEBUGREG: {
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, datalp);
#else
ret = put_user(child->thread.dabr, datalp);
#endif
break;
}
case PTRACE_SET_DEBUGREG:
ret = ptrace_set_debugreg(child, addr, data);
break;
#ifdef CONFIG_PPC64
case PTRACE_GETREGS64:
#endif
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
#endif
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
#endif
/* Old reverse args ptrace callss */
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
ret = arch_ptrace_old(child, request, addr, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->gpr[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
if (unlikely(current->audit_context)) {
#ifdef CONFIG_PPC64
if (!is_32bit_task())
audit_syscall_entry(AUDIT_ARCH_PPC64,
regs->gpr[0],
regs->gpr[3], regs->gpr[4],
regs->gpr[5], regs->gpr[6]);
else
#endif
audit_syscall_entry(AUDIT_ARCH_PPC,
regs->gpr[0],
regs->gpr[3] & 0xffffffff,
regs->gpr[4] & 0xffffffff,
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
}
return ret ?: regs->gpr[0];
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
regs->result);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
|
53,314,898,618,497 | /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Modified by Cort Dougan and Paul Mackerras.
*
* Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <mm/mmu_decl.h>
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
return 0;
}
#endif
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
*/
static int store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int __user *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
return 0;
/* check major opcode */
switch (inst >> 26) {
case 37: /* stwu */
case 39: /* stbu */
case 45: /* sthu */
case 53: /* stfsu */
case 55: /* stfdu */
return 1;
case 62: /* std or stdu */
return (inst & 3) == 1;
case 31:
/* check minor opcode */
switch ((inst >> 1) & 0x3ff) {
case 181: /* stdux */
case 183: /* stwux */
case 247: /* stbux */
case 439: /* sthux */
case 695: /* stfsux */
case 759: /* stfdux */
return 1;
}
}
return 0;
}
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault. For 400-family processors
* the error_code parameter is ESR for a data fault, 0 for an instruction
* fault.
* For 64-bit processors, the error_code parameter is
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
int is_write = 0, ret;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
* Fortunately the bit assignments in SRR1 for an instruction
* fault and DSISR for a data fault are mostly the same for the
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
error_code &= 0x48200000;
else
is_write = error_code & DSISR_ISSTORE;
#else
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
if (notify_page_fault(regs))
return 0;
if (unlikely(debugger_fault_handler(regs)))
return 0;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, address, error_code);
return 0;
}
#endif
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
/*
* N.B. The POWER/Open ABI allows programs to access up to
* 288 bytes below the stack pointer.
* The kernel signal delivery code writes up to about 1.5kB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
goto bad_area;
/*
* A user-mode access to an address a long way below
* the stack pointer is only valid if the instruction
* is one which would update the stack pointer to the
* address accessed if the instruction completed,
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
* (or the byte, halfword, float or double forms).
*
* If we don't check this then any write to the area
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
if (address + 2048 < uregs->gpr[1]
&& (!user_mode(regs) || !store_updates_sp(regs)))
goto bad_area;
}
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
#if defined(CONFIG_6xx)
if (error_code & 0x95700000)
/* an error such as lwarx to I/O controller space,
address matching DABR, eciwx, etc. */
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
/* 8xx sometimes need to load a invalid/non-present TLBs.
* These must be invalidated separately as linux mm don't.
*/
if (error_code & 0x40000000) /* no translation? */
_tlbil_va(address, 0, 0, 0);
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
if (error_code & 0x10000000)
/* Guarded storage error. */
goto bad_area;
#endif /* CONFIG_8xx */
if (is_exec) {
#ifdef CONFIG_PPC_STD_MMU
/* Protection fault on exec go straight to failure on
* Hash based MMUs as they either don't support per-page
* execute permission, or if they do, it's handled already
* at the hash level. This test would probably have to
* be removed if we change the way this works to make hash
* processors use the same I/D cache coherency mechanism
* as embedded.
*/
if (error_code & DSISR_PROTFAULT)
goto bad_area;
#endif /* CONFIG_PPC_STD_MMU */
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
*
* Note: That code used to not be enabled for 4xx/BookE.
* It is now as I/D cache coherency for these is done at
* set_pte_at() time and I see no reason why the test
* below wouldn't be valid on those processors. This -may-
* break programs compiled with a really old ABI though.
*/
if (!(vma->vm_flags & VM_EXEC) &&
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
/* a write */
} else if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
else if (ret & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
preempt_disable();
get_lppaca()->page_ins += (1 << PAGE_FACTOR);
preempt_enable();
}
#endif
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
return 0;
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
return 0;
}
if (is_exec && (error_code & DSISR_PROTFAULT)
&& printk_ratelimit())
printk(KERN_CRIT "kernel tried to execute NX-protected"
" page (%lx) - exploit attempt? (uid: %d)\n",
address, current_uid());
return SIGSEGV;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
return SIGKILL;
pagefault_out_of_memory();
return 0;
do_sigbus:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
return 0;
}
return SIGBUS;
}
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = entry->fixup;
return;
}
/* kernel has accessed a bad area */
switch (regs->trap) {
case 0x300:
case 0x380:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"data at address 0x%08lx\n", regs->dar);
break;
case 0x400:
case 0x480:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n");
break;
default:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n");
break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
stackend = end_of_stack(current);
if (current != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
}
| /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Modified by Cort Dougan and Paul Mackerras.
*
* Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <mm/mmu_decl.h>
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
return 0;
}
#endif
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
*/
static int store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int __user *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
return 0;
/* check major opcode */
switch (inst >> 26) {
case 37: /* stwu */
case 39: /* stbu */
case 45: /* sthu */
case 53: /* stfsu */
case 55: /* stfdu */
return 1;
case 62: /* std or stdu */
return (inst & 3) == 1;
case 31:
/* check minor opcode */
switch ((inst >> 1) & 0x3ff) {
case 181: /* stdux */
case 183: /* stwux */
case 247: /* stbux */
case 439: /* sthux */
case 695: /* stfsux */
case 759: /* stfdux */
return 1;
}
}
return 0;
}
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault. For 400-family processors
* the error_code parameter is ESR for a data fault, 0 for an instruction
* fault.
* For 64-bit processors, the error_code parameter is
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
int is_write = 0, ret;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
* Fortunately the bit assignments in SRR1 for an instruction
* fault and DSISR for a data fault are mostly the same for the
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
error_code &= 0x48200000;
else
is_write = error_code & DSISR_ISSTORE;
#else
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
if (notify_page_fault(regs))
return 0;
if (unlikely(debugger_fault_handler(regs)))
return 0;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, address, error_code);
return 0;
}
#endif
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
/*
* N.B. The POWER/Open ABI allows programs to access up to
* 288 bytes below the stack pointer.
* The kernel signal delivery code writes up to about 1.5kB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
goto bad_area;
/*
* A user-mode access to an address a long way below
* the stack pointer is only valid if the instruction
* is one which would update the stack pointer to the
* address accessed if the instruction completed,
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
* (or the byte, halfword, float or double forms).
*
* If we don't check this then any write to the area
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
if (address + 2048 < uregs->gpr[1]
&& (!user_mode(regs) || !store_updates_sp(regs)))
goto bad_area;
}
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
#if defined(CONFIG_6xx)
if (error_code & 0x95700000)
/* an error such as lwarx to I/O controller space,
address matching DABR, eciwx, etc. */
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
/* 8xx sometimes need to load a invalid/non-present TLBs.
* These must be invalidated separately as linux mm don't.
*/
if (error_code & 0x40000000) /* no translation? */
_tlbil_va(address, 0, 0, 0);
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
if (error_code & 0x10000000)
/* Guarded storage error. */
goto bad_area;
#endif /* CONFIG_8xx */
if (is_exec) {
#ifdef CONFIG_PPC_STD_MMU
/* Protection fault on exec go straight to failure on
* Hash based MMUs as they either don't support per-page
* execute permission, or if they do, it's handled already
* at the hash level. This test would probably have to
* be removed if we change the way this works to make hash
* processors use the same I/D cache coherency mechanism
* as embedded.
*/
if (error_code & DSISR_PROTFAULT)
goto bad_area;
#endif /* CONFIG_PPC_STD_MMU */
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
*
* Note: That code used to not be enabled for 4xx/BookE.
* It is now as I/D cache coherency for these is done at
* set_pte_at() time and I see no reason why the test
* below wouldn't be valid on those processors. This -may-
* break programs compiled with a really old ABI though.
*/
if (!(vma->vm_flags & VM_EXEC) &&
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
/* a write */
} else if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
else if (ret & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
preempt_disable();
get_lppaca()->page_ins += (1 << PAGE_FACTOR);
preempt_enable();
}
#endif
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
up_read(&mm->mmap_sem);
return 0;
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
return 0;
}
if (is_exec && (error_code & DSISR_PROTFAULT)
&& printk_ratelimit())
printk(KERN_CRIT "kernel tried to execute NX-protected"
" page (%lx) - exploit attempt? (uid: %d)\n",
address, current_uid());
return SIGSEGV;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
return SIGKILL;
pagefault_out_of_memory();
return 0;
do_sigbus:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
return 0;
}
return SIGBUS;
}
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = entry->fixup;
return;
}
/* kernel has accessed a bad area */
switch (regs->trap) {
case 0x300:
case 0x380:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"data at address 0x%08lx\n", regs->dar);
break;
case 0x400:
case 0x480:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n");
break;
default:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n");
break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
stackend = end_of_stack(current);
if (current != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
}
|
137,922,171,175,431 | /*
* arch/s390/mm/fault.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/compat.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
static unsigned long store_indication;
void fault_init(void)
{
if (test_facility(2) && test_facility(75))
store_indication = 0xc00;
}
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Unlock any spinlocks which will prevent us from getting the
* message out.
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
int loglevel_save = console_loglevel;
console_unblank();
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15;
printk(" ");
console_loglevel = loglevel_save;
}
}
/*
* Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space.
*/
static inline int user_space_fault(unsigned long trans_exc_code)
{
/*
* The lowest two bits of the translation exception
* identification indicate which paging table was used.
*/
trans_exc_code &= 3;
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
if (user_mode == HOME_SPACE_MODE)
/* User space if the access has been done via home space. */
return trans_exc_code == 3;
/*
* If the user space is not the home space the kernel runs in home
* space. Access via secondary space has already been covered,
* access via primary space or access register is from user space
* and access via home space is from the kernel.
*/
return trans_exc_code != 3;
}
static inline void report_user_fault(struct pt_regs *regs, long int_code,
int signr, unsigned long address)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk("User process fault: interruption code 0x%lX ", int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
printk("failing address: %lX\n", address);
show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
int si_code, unsigned long trans_exc_code)
{
struct siginfo si;
unsigned long address;
address = trans_exc_code & __FAIL_ADDR_MASK;
current->thread.prot_addr = address;
current->thread.trap_no = int_code;
report_user_fault(regs, int_code, SIGSEGV, address);
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &si, current);
}
static noinline void do_no_context(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
const struct exception_table_entry *fixup;
unsigned long address;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
address = trans_exc_code & __FAIL_ADDR_MASK;
if (!user_space_fault(trans_exc_code))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
die("Oops", regs, int_code);
do_exit(SIGKILL);
}
static noinline void do_low_address(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
die ("Low-address protection", regs, int_code);
do_exit(SIGKILL);
}
do_no_context(regs, int_code, trans_exc_code);
}
static noinline void do_sigbus(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
struct task_struct *tsk = current;
unsigned long address;
struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
address = trans_exc_code & __FAIL_ADDR_MASK;
tsk->thread.prot_addr = address;
tsk->thread.trap_no = int_code;
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
si.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &si, tsk);
}
static noinline void do_fault_error(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, int_code, si_code, trans_exc_code);
return;
}
case VM_FAULT_BADCONTEXT:
do_no_context(regs, int_code, trans_exc_code);
break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, int_code, trans_exc_code);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, int_code, trans_exc_code);
else
do_sigbus(regs, int_code, trans_exc_code);
} else
BUG();
break;
}
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
static inline int do_exception(struct pt_regs *regs, int access,
unsigned long trans_exc_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
unsigned int flags;
int fault;
if (notify_page_fault(regs))
return 0;
tsk = current;
mm = tsk->mm;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
if (expand_stack(vma, address))
goto out_up;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
out:
return fault;
}
void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
int fault;
/* Protection exception is suppressing, decrement psw address. */
regs->psw.addr -= (pgm_int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
do_low_address(regs, pgm_int_code, trans_exc_code);
return;
}
fault = do_exception(regs, VM_WRITE, trans_exc_code);
if (unlikely(fault))
do_fault_error(regs, 4, trans_exc_code, fault);
}
void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault))
do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
}
#ifdef CONFIG_64BIT
void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem);
if (vma) {
update_mm(mm, current);
return;
}
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
return;
}
no_context:
do_no_context(regs, pgm_int_code, trans_exc_code);
}
#endif
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
regs.psw.mask = psw_kernel_bits;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
uaddr &= PAGE_MASK;
access = write ? VM_WRITE : VM_READ;
fault = do_exception(®s, access, uaddr | 2);
if (unlikely(fault)) {
if (fault & VM_FAULT_OOM)
return -EFAULT;
else if (fault & VM_FAULT_SIGBUS)
do_sigbus(®s, pgm_int_code, uaddr);
}
return fault ? -EFAULT : 0;
}
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
static int pfault_disable;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
__setup("nopfault", nopfault);
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_CURRENT_PID,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD };
int rc;
if (!MACHINE_IS_VM || pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
"0: j 2f\n"
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
return rc;
}
void pfault_fini(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
if (!MACHINE_IS_VM || pfault_disable)
return;
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
static void pfault_interrupt(unsigned int ext_int_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
*/
subcode = ext_int_code >> 16;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
if (subcode & 0x0080) {
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
} else {
tsk = current;
}
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
} else {
/* Completion interrupt was faster than initial
* interrupt. Set pfault_wait to -1 so the initial
* interrupt doesn't put the task to sleep. */
tsk->thread.pfault_wait = -1;
}
put_task_struct(tsk);
} else {
/* signal bit not set -> a real page is missing. */
if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
* back to zero and exit. */
tsk->thread.pfault_wait = 0;
} else {
/* Initial interrupt arrived before completion
* interrupt. Let the task sleep. */
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
spin_unlock(&pfault_lock);
}
static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
}
spin_unlock_irq(&pfault_lock);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __init pfault_irq_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return 0;
rc = register_external_interrupt(0x2603, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
service_subclass_irq_register();
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
| /*
* arch/s390/mm/fault.c
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/compat.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/compat.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
static unsigned long store_indication;
void fault_init(void)
{
if (test_facility(2) && test_facility(75))
store_indication = 0xc00;
}
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Unlock any spinlocks which will prevent us from getting the
* message out.
*/
void bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
int loglevel_save = console_loglevel;
console_unblank();
oops_in_progress = 0;
/*
* OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats...
*/
console_loglevel = 15;
printk(" ");
console_loglevel = loglevel_save;
}
}
/*
* Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space.
*/
static inline int user_space_fault(unsigned long trans_exc_code)
{
/*
* The lowest two bits of the translation exception
* identification indicate which paging table was used.
*/
trans_exc_code &= 3;
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
if (user_mode == HOME_SPACE_MODE)
/* User space if the access has been done via home space. */
return trans_exc_code == 3;
/*
* If the user space is not the home space the kernel runs in home
* space. Access via secondary space has already been covered,
* access via primary space or access register is from user space
* and access via home space is from the kernel.
*/
return trans_exc_code != 3;
}
static inline void report_user_fault(struct pt_regs *regs, long int_code,
int signr, unsigned long address)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
return;
if (!unhandled_signal(current, signr))
return;
if (!printk_ratelimit())
return;
printk("User process fault: interruption code 0x%lX ", int_code);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk("\n");
printk("failing address: %lX\n", address);
show_regs(regs);
}
/*
* Send SIGSEGV to task. This is an external routine
* to keep the stack usage of do_page_fault small.
*/
static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
int si_code, unsigned long trans_exc_code)
{
struct siginfo si;
unsigned long address;
address = trans_exc_code & __FAIL_ADDR_MASK;
current->thread.prot_addr = address;
current->thread.trap_no = int_code;
report_user_fault(regs, int_code, SIGSEGV, address);
si.si_signo = SIGSEGV;
si.si_code = si_code;
si.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &si, current);
}
static noinline void do_no_context(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
const struct exception_table_entry *fixup;
unsigned long address;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
address = trans_exc_code & __FAIL_ADDR_MASK;
if (!user_space_fault(trans_exc_code))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %p\n", (void *)address);
die("Oops", regs, int_code);
do_exit(SIGKILL);
}
static noinline void do_low_address(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
/* Low-address protection hit in kernel mode means
NULL pointer write access in kernel mode. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* Low-address protection hit in user mode 'cannot happen'. */
die ("Low-address protection", regs, int_code);
do_exit(SIGKILL);
}
do_no_context(regs, int_code, trans_exc_code);
}
static noinline void do_sigbus(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code)
{
struct task_struct *tsk = current;
unsigned long address;
struct siginfo si;
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
address = trans_exc_code & __FAIL_ADDR_MASK;
tsk->thread.prot_addr = address;
tsk->thread.trap_no = int_code;
si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRERR;
si.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &si, tsk);
}
static noinline void do_fault_error(struct pt_regs *regs, long int_code,
unsigned long trans_exc_code, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (regs->psw.mask & PSW_MASK_PSTATE) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, int_code, si_code, trans_exc_code);
return;
}
case VM_FAULT_BADCONTEXT:
do_no_context(regs, int_code, trans_exc_code);
break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, int_code, trans_exc_code);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
do_no_context(regs, int_code, trans_exc_code);
else
do_sigbus(regs, int_code, trans_exc_code);
} else
BUG();
break;
}
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* interruption code (int_code):
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
static inline int do_exception(struct pt_regs *regs, int access,
unsigned long trans_exc_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
unsigned int flags;
int fault;
if (notify_page_fault(regs))
return 0;
tsk = current;
mm = tsk->mm;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
if (expand_stack(vma, address))
goto out_up;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
out:
return fault;
}
void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
int fault;
/* Protection exception is suppressing, decrement psw address. */
regs->psw.addr -= (pgm_int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
* field is not guaranteed to contain valid data in this case.
*/
if (unlikely(!(trans_exc_code & 4))) {
do_low_address(regs, pgm_int_code, trans_exc_code);
return;
}
fault = do_exception(regs, VM_WRITE, trans_exc_code);
if (unlikely(fault))
do_fault_error(regs, 4, trans_exc_code, fault);
}
void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
int access, fault;
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access, trans_exc_code);
if (unlikely(fault))
do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
}
#ifdef CONFIG_64BIT
void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
unsigned long trans_exc_code)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
up_read(&mm->mmap_sem);
if (vma) {
update_mm(mm, current);
return;
}
/* User mode accesses just cause a SIGSEGV */
if (regs->psw.mask & PSW_MASK_PSTATE) {
do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
return;
}
no_context:
do_no_context(regs, pgm_int_code, trans_exc_code);
}
#endif
int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
{
struct pt_regs regs;
int access, fault;
regs.psw.mask = psw_kernel_bits;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) __builtin_return_address(0);
regs.psw.addr |= PSW_ADDR_AMODE;
uaddr &= PAGE_MASK;
access = write ? VM_WRITE : VM_READ;
fault = do_exception(®s, access, uaddr | 2);
if (unlikely(fault)) {
if (fault & VM_FAULT_OOM)
return -EFAULT;
else if (fault & VM_FAULT_SIGBUS)
do_sigbus(®s, pgm_int_code, uaddr);
}
return fault ? -EFAULT : 0;
}
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
*/
static int pfault_disable;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
__setup("nopfault", nopfault);
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
} __attribute__ ((packed, aligned(8)));
int pfault_init(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_CURRENT_PID,
.refselmk = 1ULL << 48,
.refcmpmk = 1ULL << 48,
.reserved = __PF_RES_FIELD };
int rc;
if (!MACHINE_IS_VM || pfault_disable)
return -1;
asm volatile(
" diag %1,%0,0x258\n"
"0: j 2f\n"
"1: la %0,8\n"
"2:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
return rc;
}
void pfault_fini(void)
{
struct pfault_refbk refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
if (!MACHINE_IS_VM || pfault_disable)
return;
asm volatile(
" diag %0,0,0x258\n"
"0:\n"
EX_TABLE(0b,0b)
: : "a" (&refbk), "m" (refbk) : "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
static void pfault_interrupt(unsigned int ext_int_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
/*
* Get the external interruption subcode & pfault
* initial/completion signal bit. VM stores this
* in the 'cpu address' field associated with the
* external interrupt.
*/
subcode = ext_int_code >> 16;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
if (subcode & 0x0080) {
/* Get the token (= pid of the affected task). */
pid = sizeof(void *) == 4 ? param32 : param64;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
} else {
tsk = current;
}
spin_lock(&pfault_lock);
if (subcode & 0x0080) {
/* signal bit is set -> a page has been swapped in by VM */
if (tsk->thread.pfault_wait == 1) {
/* Initial interrupt was faster than the completion
* interrupt. pfault_wait is valid. Set pfault_wait
* back to zero and wake up the process. This can
* safely be done because the task is still sleeping
* and can't produce new pfaults. */
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
} else {
/* Completion interrupt was faster than initial
* interrupt. Set pfault_wait to -1 so the initial
* interrupt doesn't put the task to sleep. */
tsk->thread.pfault_wait = -1;
}
put_task_struct(tsk);
} else {
/* signal bit not set -> a real page is missing. */
if (tsk->thread.pfault_wait == -1) {
/* Completion interrupt was faster than the initial
* interrupt (pfault_wait == -1). Set pfault_wait
* back to zero and exit. */
tsk->thread.pfault_wait = 0;
} else {
/* Initial interrupt arrived before completion
* interrupt. Let the task sleep. */
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
}
}
spin_unlock(&pfault_lock);
}
static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
switch (action) {
case CPU_DEAD:
case CPU_DEAD_FROZEN:
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
}
spin_unlock_irq(&pfault_lock);
break;
default:
break;
}
return NOTIFY_OK;
}
static int __init pfault_irq_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return 0;
rc = register_external_interrupt(0x2603, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
service_subclass_irq_register();
hotcpu_notifier(pfault_cpu_notify, 0);
return 0;
out_pfault:
unregister_external_interrupt(0x2603, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
|
277,811,928,275,237 | /*
* SuperH process tracing
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* This routine will get a word off of the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
return (*((int *)stack));
}
/*
* This routine will put a word on the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
void ptrace_triggered(struct perf_event *bp, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions.
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
static int set_single_step(struct task_struct *tsk, unsigned long addr)
{
struct thread_struct *thread = &tsk->thread;
struct perf_event *bp;
struct perf_event_attr attr;
bp = thread->ptrace_bps[0];
if (!bp) {
ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
attr.bp_type = HW_BREAKPOINT_R;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
thread->ptrace_bps[0] = bp;
} else {
int err;
attr = bp->attr;
attr.bp_addr = addr;
/* reenable breakpoint */
attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
}
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (ptrace_get_breakpoints(child) < 0)
return;
set_single_step(child, pc);
ptrace_put_breakpoints(child);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret)
/* PC, PR, SR, GBR, MACH, MACL, TRA */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
#ifdef CONFIG_SH_DSP
static int dspregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_active(struct task_struct *target,
const struct user_regset *regset)
{
struct pt_regs *regs = task_pt_regs(target);
return regs->sr & SR_DSP ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(pr),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(gbr),
REG_OFFSET_NAME(mach),
REG_OFFSET_NAME(macl),
REG_OFFSET_NAME(tra),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
#ifdef CONFIG_SH_DSP
REGSET_DSP,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* R0 --> R15
* PC, PR, SR, GBR, MACH, MACL, TRA
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
#ifdef CONFIG_SH_DSP
[REGSET_DSP] = {
.n = sizeof(struct pt_dspregs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = dspregs_get,
.set = dspregs_set,
.active = dspregs_active,
},
#endif
};
static const struct user_regset_view user_sh_native_view = {
.name = "sh",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
} else {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = ((unsigned long *)child->thread.xstate)
[index >> 2];
}
} else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
else if (addr == PT_DATA_ADDR)
tmp = child->mm->start_data;
else if (addr == PT_TEXT_END_ADDR)
tmp = child->mm->end_code;
else if (addr == PT_TEXT_LEN)
tmp = child->mm->end_code - child->mm->start_code;
else
tmp = 0;
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
((unsigned long *)child->thread.xstate)
[index >> 2] = data;
ret = 0;
} else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
static inline int audit_arch(void)
{
int arch = EM_SH;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->regs[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[0]);
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[3],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
return ret ?: regs->regs[0];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
regs->regs[0]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[0]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
| /*
* SuperH process tracing
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* This routine will get a word off of the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
return (*((int *)stack));
}
/*
* This routine will put a word on the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions.
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
static int set_single_step(struct task_struct *tsk, unsigned long addr)
{
struct thread_struct *thread = &tsk->thread;
struct perf_event *bp;
struct perf_event_attr attr;
bp = thread->ptrace_bps[0];
if (!bp) {
ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
attr.bp_type = HW_BREAKPOINT_R;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
thread->ptrace_bps[0] = bp;
} else {
int err;
attr = bp->attr;
attr.bp_addr = addr;
/* reenable breakpoint */
attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
}
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (ptrace_get_breakpoints(child) < 0)
return;
set_single_step(child, pc);
ptrace_put_breakpoints(child);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret)
/* PC, PR, SR, GBR, MACH, MACL, TRA */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
#ifdef CONFIG_SH_DSP
static int dspregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_active(struct task_struct *target,
const struct user_regset *regset)
{
struct pt_regs *regs = task_pt_regs(target);
return regs->sr & SR_DSP ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(pr),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(gbr),
REG_OFFSET_NAME(mach),
REG_OFFSET_NAME(macl),
REG_OFFSET_NAME(tra),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
#ifdef CONFIG_SH_DSP
REGSET_DSP,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* R0 --> R15
* PC, PR, SR, GBR, MACH, MACL, TRA
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
#ifdef CONFIG_SH_DSP
[REGSET_DSP] = {
.n = sizeof(struct pt_dspregs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = dspregs_get,
.set = dspregs_set,
.active = dspregs_active,
},
#endif
};
static const struct user_regset_view user_sh_native_view = {
.name = "sh",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
} else {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = ((unsigned long *)child->thread.xstate)
[index >> 2];
}
} else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
else if (addr == PT_DATA_ADDR)
tmp = child->mm->start_data;
else if (addr == PT_TEXT_END_ADDR)
tmp = child->mm->end_code;
else if (addr == PT_TEXT_LEN)
tmp = child->mm->end_code - child->mm->start_code;
else
tmp = 0;
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
((unsigned long *)child->thread.xstate)
[index >> 2] = data;
ret = 0;
} else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
static inline int audit_arch(void)
{
int arch = EM_SH;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->regs[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[0]);
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[3],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
return ret ?: regs->regs[0];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
regs->regs[0]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[0]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
|
36,912,320,636,613 | /*
* 'traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
* Copyright (C) 2002 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
# define TRAP_ILLEGAL_SLOT_INST 6
# define TRAP_ADDRESS_ERROR 9
# ifdef CONFIG_CPU_SH2A
# define TRAP_UBC 12
# define TRAP_FPU_ERROR 13
# define TRAP_DIVZERO_ERROR 17
# define TRAP_DIVOVF_ERROR 18
# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%04lx: ", p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
return;
}
printk("%08x ", val);
}
}
printk("\n");
}
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
print_modules();
show_regs(regs);
printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
(unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char *str, struct pt_regs *regs,
long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
* - kernel/userspace interfaces cause a jump to an appropriate handler
* - other kernel errors are bad
*/
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
if ((count == 1) && dst[0] & 0x80) {
dst[1] = 0xff;
dst[2] = 0xff;
dst[3] = 0xff;
}
if ((count == 2) && dst[1] & 0x80) {
dst[2] = 0xff;
dst[3] = 0xff;
}
#else
if ((count == 1) && dst[3] & 0x80) {
dst[2] = 0xff;
dst[1] = 0xff;
dst[0] = 0xff;
}
if ((count == 2) && dst[2] & 0x80) {
dst[1] = 0xff;
dst[0] = 0xff;
}
#endif
}
static struct mem_access user_mem_access = {
copy_from_user,
copy_to_user,
};
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
* - note that PC _may not_ point to the faulting instruction
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = ®s->regs[index];
index = (instruction>>4)&15; /* 0x00F0 */
rm = ®s->regs[index];
count = 1<<(instruction&3);
switch (count) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
srcu = (unsigned char __user *)*rm;
srcu += regs->regs[0];
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
dstu = (unsigned char __user *)*rn;
dstu += regs->regs[0];
if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
break;
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
dstu += (instruction&0x000F)<<2;
if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 2;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
break;
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
src = (unsigned char *) ®s->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
dstu += (instruction & 0x000F) << 1;
if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 1;
dst = (unsigned char *) ®s->regs[0];
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
}
break;
}
return ret;
fetch_fault:
/* Argh. Address not only misaligned but also non-existent.
* Raise an EFAULT and see if it's trapped
*/
die_if_no_fixup("Fault in unaligned fixup", regs, 0);
return -EFAULT;
}
/*
* emulate the instruction in the delay slot
* - fetches the instruction from PC+2
*/
static inline int handle_delayslot(struct pt_regs *regs,
insn_size_t old_instruction,
struct mem_access *ma)
{
insn_size_t instruction;
void __user *addr = (void __user *)(regs->pc +
instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
if (user_mode(regs))
return -EFAULT;
/* kernel */
die("delay-slot-insn faulting in handle_unaligned_delayslot",
regs, 0);
}
return handle_unaligned_ins(instruction, regs, ma);
}
/*
* handle an instruction that does an unaligned memory access
* - have to be careful of branch delay-slot instructions that fault
* SH3:
* - if the branch would be taken PC points to the branch
* - if the branch would not be taken, PC points to delay-slot
* SH4:
* - PC always points to delayed branch
* - return 0 if handled, -EFAULT if failed (may not return if in kernel)
*/
/* Macros to determine offset from current PC for branch instructions */
/* Explicit type coercion is used to force sign extension where needed */
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int expected,
unsigned long address)
{
u_int rm;
int ret, index;
/*
* XXX: We can't handle mixed 16/32-bit instructions yet
*/
if (instruction_size(instruction) != 2)
return -EINVAL;
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
/*
* Log the unexpected fixups, and then pass them on to perf.
*
* We intentionally don't report the expected cases to perf as
* otherwise the trapped I/O case will skew the results too much
* to be useful.
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
regs, address);
}
ret = -EFAULT;
switch (instruction&0xF000) {
case 0x0000:
if (instruction==0x000B) {
/* rts */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = regs->pr;
}
else if ((instruction&0x00FF)==0x0023) {
/* braf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += rm + 4;
}
else if ((instruction&0x00FF)==0x0003) {
/* bsrf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += rm + 4;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x1000: /* mov.l Rm,@(disp,Rn) */
goto simple;
case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
goto simple;
case 0x4000:
if ((instruction&0x00FF)==0x002B) {
/* jmp @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = rm;
}
else if ((instruction&0x00FF)==0x000B) {
/* jsr @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc = rm;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x5000: /* mov.l @(disp,Rm),Rn */
goto simple;
case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
goto simple;
case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
switch (instruction&0x0F00) {
case 0x0100: /* mov.w R0,@(disp,Rm) */
goto simple;
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) != 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
case 0x0900: /* bt lab - no delayslot */
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) == 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
}
break;
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += SH_PC_12BIT_OFFSET(instruction);
break;
case 0xB000: /* bsr label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
}
return ret;
/* handle non-delay-slot instruction */
simple:
ret = handle_unaligned_ins(instruction, regs, ma);
if (ret==0)
regs->pc += instruction_size(instruction);
return ret;
}
/*
* Handle various address error exceptions:
* - instruction address error:
* misaligned PC
* PC >= 0x80000000 in user mode
* - data address error (read and write)
* misaligned data access
* access to >= 0x80000000 is user mode
* Unfortuntaly we can't distinguish between instruction address error
* and data address errors caused by read accesses.
*/
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long error_code = 0;
mm_segment_t oldfs;
siginfo_t info;
insn_size_t instruction;
int tmp;
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
error_code = lookup_exception_vector();
#endif
oldfs = get_fs();
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
unsigned int user_action;
local_irq_enable();
inc_unaligned_user_access();
set_fs(USER_DS);
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
sizeof(instruction))) {
set_fs(oldfs);
goto uspace_segv;
}
set_fs(oldfs);
/* shout about userspace fixups */
unaligned_fixups_notify(current, instruction, regs);
user_action = unaligned_user_action();
if (user_action & UM_FIXUP)
goto fixup;
if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
regs->pc += instruction_size(instruction);
return;
}
fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
goto uspace_segv;
}
set_fs(USER_DS);
tmp = handle_unaligned_access(instruction, regs,
&user_mem_access, 0,
address);
set_fs(oldfs);
if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
"access (PC %lx PR %lx)\n", current->comm, regs->pc,
regs->pr);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
set_fs(oldfs);
die("insn faulting in do_address_error", regs, 0);
}
unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs, &user_mem_access,
0, address);
set_fs(oldfs);
}
}
#ifdef CONFIG_SH_DSP
/*
* SH-DSP support gerg@snapgear.com.
*/
int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst = 0;
/*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
return 0;
get_user(inst, ((unsigned short *) regs->pc));
inst &= 0xf000;
/* Check for any type of DSP or support instruction */
if ((inst == 0xf000) || (inst == 0x4000))
return 1;
return 0;
}
#else
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
siginfo_t info;
switch (r4) {
case TRAP_DIVZERO_ERROR:
info.si_code = FPE_INTDIV;
break;
case TRAP_DIVOVF_ERROR:
info.si_code = FPE_INTOVF;
break;
}
force_sig_info(SIGFPE, &info, current);
}
#endif
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
unsigned short inst = 0;
int err;
get_user(inst, (unsigned short*)regs->pc);
err = do_fpu_inst(inst, regs);
if (!err) {
regs->pc += instruction_size(inst);
return;
}
/* not a FPU inst. */
#endif
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
regs->sr |= SR_DSP;
/* Save DSP mode */
tsk->thread.dsp_status.status |= SR_DSP;
return;
}
#endif
error_code = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
static int emulate_branch(unsigned short inst, struct pt_regs *regs)
{
/*
* bfs: 8fxx: PC+=d*2+4;
* bts: 8dxx: PC+=d*2+4;
* bra: axxx: PC+=D*2+4;
* bsr: bxxx: PC+=D*2+4 after PR=PC+4;
* braf:0x23: PC+=Rn*2+4;
* bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
* jmp: 4x2b: PC=Rn;
* jsr: 4x0b: PC=Rn after PR=PC+4;
* rts: 000b: PC=PR;
*/
if (((inst & 0xf000) == 0xb000) || /* bsr */
((inst & 0xf0ff) == 0x0003) || /* bsrf */
((inst & 0xf0ff) == 0x400b)) /* jsr */
regs->pr = regs->pc + 4;
if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
regs->pc += SH_PC_8BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
regs->pc += SH_PC_12BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
return 0;
}
if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
regs->pc = regs->regs[(inst & 0x0f00) >> 8];
return 0;
}
if ((inst & 0xffff) == 0x000b) { /* rts */
regs->pc = regs->pr;
return 0;
}
return 1;
}
#endif
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long inst;
struct task_struct *tsk = current;
if (kprobe_handle_illslot(regs->pc) == 0)
return;
#ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short *)regs->pc);
if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
inst = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("illegal slot instruction", regs, inst);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
ex = lookup_exception_vector();
die_if_kernel("exception", regs, ex);
}
void __cpuinit per_cpu_trap_init(void)
{
extern void *vbr_base;
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
/* disable exception blocking now when the vbr has been setup */
clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
defined(CONFIG_SH_FPU_EMU)
/*
* For SH-4 lacking an FPU, treat floating point instructions as
* reserved. They'll be handled in the math-emu case, or faulted on
* otherwise.
*/
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2A
set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
#ifdef CONFIG_SH_FPU
set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
#endif
#endif
#ifdef TRAP_UBC
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (tsk == current)
sp = (unsigned long *)current_stack_pointer;
else
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
dump_mem("Stack: ", stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
show_trace(tsk, sp, NULL);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
| /*
* 'traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
* Copyright (C) 2002 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
# define TRAP_ILLEGAL_SLOT_INST 6
# define TRAP_ADDRESS_ERROR 9
# ifdef CONFIG_CPU_SH2A
# define TRAP_UBC 12
# define TRAP_FPU_ERROR 13
# define TRAP_DIVZERO_ERROR 17
# define TRAP_DIVOVF_ERROR 18
# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%04lx: ", p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
return;
}
printk("%08x ", val);
}
}
printk("\n");
}
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
print_modules();
show_regs(regs);
printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
(unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char *str, struct pt_regs *regs,
long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
* - kernel/userspace interfaces cause a jump to an appropriate handler
* - other kernel errors are bad
*/
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
if ((count == 1) && dst[0] & 0x80) {
dst[1] = 0xff;
dst[2] = 0xff;
dst[3] = 0xff;
}
if ((count == 2) && dst[1] & 0x80) {
dst[2] = 0xff;
dst[3] = 0xff;
}
#else
if ((count == 1) && dst[3] & 0x80) {
dst[2] = 0xff;
dst[1] = 0xff;
dst[0] = 0xff;
}
if ((count == 2) && dst[2] & 0x80) {
dst[1] = 0xff;
dst[0] = 0xff;
}
#endif
}
static struct mem_access user_mem_access = {
copy_from_user,
copy_to_user,
};
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
* - note that PC _may not_ point to the faulting instruction
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = ®s->regs[index];
index = (instruction>>4)&15; /* 0x00F0 */
rm = ®s->regs[index];
count = 1<<(instruction&3);
switch (count) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
srcu = (unsigned char __user *)*rm;
srcu += regs->regs[0];
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
dstu = (unsigned char __user *)*rn;
dstu += regs->regs[0];
if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
break;
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
dstu += (instruction&0x000F)<<2;
if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 2;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
break;
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
src = (unsigned char *) ®s->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
dstu += (instruction & 0x000F) << 1;
if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 1;
dst = (unsigned char *) ®s->regs[0];
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
}
break;
}
return ret;
fetch_fault:
/* Argh. Address not only misaligned but also non-existent.
* Raise an EFAULT and see if it's trapped
*/
die_if_no_fixup("Fault in unaligned fixup", regs, 0);
return -EFAULT;
}
/*
* emulate the instruction in the delay slot
* - fetches the instruction from PC+2
*/
static inline int handle_delayslot(struct pt_regs *regs,
insn_size_t old_instruction,
struct mem_access *ma)
{
insn_size_t instruction;
void __user *addr = (void __user *)(regs->pc +
instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
if (user_mode(regs))
return -EFAULT;
/* kernel */
die("delay-slot-insn faulting in handle_unaligned_delayslot",
regs, 0);
}
return handle_unaligned_ins(instruction, regs, ma);
}
/*
* handle an instruction that does an unaligned memory access
* - have to be careful of branch delay-slot instructions that fault
* SH3:
* - if the branch would be taken PC points to the branch
* - if the branch would not be taken, PC points to delay-slot
* SH4:
* - PC always points to delayed branch
* - return 0 if handled, -EFAULT if failed (may not return if in kernel)
*/
/* Macros to determine offset from current PC for branch instructions */
/* Explicit type coercion is used to force sign extension where needed */
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int expected,
unsigned long address)
{
u_int rm;
int ret, index;
/*
* XXX: We can't handle mixed 16/32-bit instructions yet
*/
if (instruction_size(instruction) != 2)
return -EINVAL;
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
/*
* Log the unexpected fixups, and then pass them on to perf.
*
* We intentionally don't report the expected cases to perf as
* otherwise the trapped I/O case will skew the results too much
* to be useful.
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
regs, address);
}
ret = -EFAULT;
switch (instruction&0xF000) {
case 0x0000:
if (instruction==0x000B) {
/* rts */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = regs->pr;
}
else if ((instruction&0x00FF)==0x0023) {
/* braf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += rm + 4;
}
else if ((instruction&0x00FF)==0x0003) {
/* bsrf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += rm + 4;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x1000: /* mov.l Rm,@(disp,Rn) */
goto simple;
case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
goto simple;
case 0x4000:
if ((instruction&0x00FF)==0x002B) {
/* jmp @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = rm;
}
else if ((instruction&0x00FF)==0x000B) {
/* jsr @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc = rm;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x5000: /* mov.l @(disp,Rm),Rn */
goto simple;
case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
goto simple;
case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
switch (instruction&0x0F00) {
case 0x0100: /* mov.w R0,@(disp,Rm) */
goto simple;
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) != 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
case 0x0900: /* bt lab - no delayslot */
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) == 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
}
break;
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += SH_PC_12BIT_OFFSET(instruction);
break;
case 0xB000: /* bsr label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
}
return ret;
/* handle non-delay-slot instruction */
simple:
ret = handle_unaligned_ins(instruction, regs, ma);
if (ret==0)
regs->pc += instruction_size(instruction);
return ret;
}
/*
* Handle various address error exceptions:
* - instruction address error:
* misaligned PC
* PC >= 0x80000000 in user mode
* - data address error (read and write)
* misaligned data access
* access to >= 0x80000000 is user mode
* Unfortuntaly we can't distinguish between instruction address error
* and data address errors caused by read accesses.
*/
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long error_code = 0;
mm_segment_t oldfs;
siginfo_t info;
insn_size_t instruction;
int tmp;
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
error_code = lookup_exception_vector();
#endif
oldfs = get_fs();
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
unsigned int user_action;
local_irq_enable();
inc_unaligned_user_access();
set_fs(USER_DS);
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
sizeof(instruction))) {
set_fs(oldfs);
goto uspace_segv;
}
set_fs(oldfs);
/* shout about userspace fixups */
unaligned_fixups_notify(current, instruction, regs);
user_action = unaligned_user_action();
if (user_action & UM_FIXUP)
goto fixup;
if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
regs->pc += instruction_size(instruction);
return;
}
fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
goto uspace_segv;
}
set_fs(USER_DS);
tmp = handle_unaligned_access(instruction, regs,
&user_mem_access, 0,
address);
set_fs(oldfs);
if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
"access (PC %lx PR %lx)\n", current->comm, regs->pc,
regs->pr);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
set_fs(oldfs);
die("insn faulting in do_address_error", regs, 0);
}
unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs, &user_mem_access,
0, address);
set_fs(oldfs);
}
}
#ifdef CONFIG_SH_DSP
/*
* SH-DSP support gerg@snapgear.com.
*/
int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst = 0;
/*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
return 0;
get_user(inst, ((unsigned short *) regs->pc));
inst &= 0xf000;
/* Check for any type of DSP or support instruction */
if ((inst == 0xf000) || (inst == 0x4000))
return 1;
return 0;
}
#else
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
siginfo_t info;
switch (r4) {
case TRAP_DIVZERO_ERROR:
info.si_code = FPE_INTDIV;
break;
case TRAP_DIVOVF_ERROR:
info.si_code = FPE_INTOVF;
break;
}
force_sig_info(SIGFPE, &info, current);
}
#endif
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
unsigned short inst = 0;
int err;
get_user(inst, (unsigned short*)regs->pc);
err = do_fpu_inst(inst, regs);
if (!err) {
regs->pc += instruction_size(inst);
return;
}
/* not a FPU inst. */
#endif
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
regs->sr |= SR_DSP;
/* Save DSP mode */
tsk->thread.dsp_status.status |= SR_DSP;
return;
}
#endif
error_code = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
static int emulate_branch(unsigned short inst, struct pt_regs *regs)
{
/*
* bfs: 8fxx: PC+=d*2+4;
* bts: 8dxx: PC+=d*2+4;
* bra: axxx: PC+=D*2+4;
* bsr: bxxx: PC+=D*2+4 after PR=PC+4;
* braf:0x23: PC+=Rn*2+4;
* bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
* jmp: 4x2b: PC=Rn;
* jsr: 4x0b: PC=Rn after PR=PC+4;
* rts: 000b: PC=PR;
*/
if (((inst & 0xf000) == 0xb000) || /* bsr */
((inst & 0xf0ff) == 0x0003) || /* bsrf */
((inst & 0xf0ff) == 0x400b)) /* jsr */
regs->pr = regs->pc + 4;
if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
regs->pc += SH_PC_8BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
regs->pc += SH_PC_12BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
return 0;
}
if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
regs->pc = regs->regs[(inst & 0x0f00) >> 8];
return 0;
}
if ((inst & 0xffff) == 0x000b) { /* rts */
regs->pc = regs->pr;
return 0;
}
return 1;
}
#endif
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long inst;
struct task_struct *tsk = current;
if (kprobe_handle_illslot(regs->pc) == 0)
return;
#ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short *)regs->pc);
if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
inst = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("illegal slot instruction", regs, inst);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
ex = lookup_exception_vector();
die_if_kernel("exception", regs, ex);
}
void __cpuinit per_cpu_trap_init(void)
{
extern void *vbr_base;
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
/* disable exception blocking now when the vbr has been setup */
clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
defined(CONFIG_SH_FPU_EMU)
/*
* For SH-4 lacking an FPU, treat floating point instructions as
* reserved. They'll be handled in the math-emu case, or faulted on
* otherwise.
*/
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2A
set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
#ifdef CONFIG_SH_FPU
set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
#endif
#endif
#ifdef TRAP_UBC
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (tsk == current)
sp = (unsigned long *)current_stack_pointer;
else
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
dump_mem("Stack: ", stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
show_trace(tsk, sp, NULL);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
|
53,835,392,916,343 | /*
* arch/sh/kernel/traps_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
#undef DEBUG_EXCEPTION
#ifdef DEBUG_EXCEPTION
/* implemented in ../lib/dbg.c */
extern void show_excp_regs(char *fname, int trapnr, int signr,
struct pt_regs *regs);
#else
#define show_excp_regs(a, b, c, d)
#endif
static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
#define DO_ERROR(trapnr, signr, str, name, tsk) \
asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
{ \
do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
printk("%s: %lx\n", str, (err & 0xffffff));
show_regs(regs);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
/* Implement misaligned load/store handling for kernel (and optionally for user
mode too). Limitation : only SHmedia mode code is handled - there is no
handling at all for misaligned accesses occurring in SHcompact code yet. */
static int misaligned_fixup(struct pt_regs *regs);
asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0) {
do_unhandled_exception(7, SIGSEGV, "address error(load)",
"do_address_error_load",
error_code, regs, current);
}
return;
}
asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0) {
do_unhandled_exception(8, SIGSEGV, "address error(store)",
"do_address_error_store",
error_code, regs, current);
}
return;
}
#if defined(CONFIG_SH64_ID2815_WORKAROUND)
#define OPCODE_INVALID 0
#define OPCODE_USER_VALID 1
#define OPCODE_PRIV_VALID 2
/* getcon/putcon - requires checking which control register is referenced. */
#define OPCODE_CTRL_REG 3
/* Table of valid opcodes for SHmedia mode.
Form a 10-bit value by concatenating the major/minor opcodes i.e.
opcode[31:26,20:16]. The 6 MSBs of this value index into the following
array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
LSBs==4'b0000 etc). */
static unsigned long shmedia_opcode_table[64] = {
0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
/* Workaround SH5-101 cut2 silicon defect #2815 :
in some situations, inter-mode branches from SHcompact -> SHmedia
which should take ITLBMISS or EXECPROT exceptions at the target
falsely take RESINST at the target instead. */
unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
unsigned long pc, aligned_pc;
int get_user_error;
int trapnr = 12;
int signr = SIGILL;
char *exception_name = "reserved_instruction";
pc = regs->pc;
if ((pc & 3) == 1) {
/* SHmedia : check for defect. This requires executable vmas
to be readable too. */
aligned_pc = pc & ~3;
if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
}
if (get_user_error >= 0) {
unsigned long index, shift;
unsigned long major, minor, combined;
unsigned long reserved_field;
reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
combined = (major << 4) | minor;
index = major;
shift = minor << 1;
if (reserved_field == 0) {
int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
switch (opcode_state) {
case OPCODE_INVALID:
/* Trap. */
break;
case OPCODE_USER_VALID:
/* Restart the instruction : the branch to the instruction will now be from an RTE
not from SHcompact so the silicon defect won't be triggered. */
return;
case OPCODE_PRIV_VALID:
if (!user_mode(regs)) {
/* Should only ever get here if a module has
SHcompact code inside it. If so, the same fix up is needed. */
return; /* same reason */
}
/* Otherwise, user mode trying to execute a privileged instruction -
fall through to trap. */
break;
case OPCODE_CTRL_REG:
/* If in privileged mode, return as above. */
if (!user_mode(regs)) return;
/* In user mode ... */
if (combined == 0x9f) { /* GETCON */
unsigned long regno = (opcode >> 20) & 0x3f;
if (regno >= 62) {
return;
}
/* Otherwise, reserved or privileged control register, => trap */
} else if (combined == 0x1bf) { /* PUTCON */
unsigned long regno = (opcode >> 4) & 0x3f;
if (regno >= 62) {
return;
}
/* Otherwise, reserved or privileged control register, => trap */
} else {
/* Trap */
}
break;
default:
/* Fall through to trap. */
break;
}
}
/* fall through to normal resinst processing */
} else {
/* Error trying to read opcode. This typically means a
real fault, not a RESINST any more. So change the
codes. */
trapnr = 87;
exception_name = "address error (exec)";
signr = SIGSEGV;
}
}
do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
}
#else /* CONFIG_SH64_ID2815_WORKAROUND */
/* If the workaround isn't needed, this is just a straightforward reserved
instruction */
DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
#endif /* CONFIG_SH64_ID2815_WORKAROUND */
/* Called with interrupts disabled */
asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
{
show_excp_regs(__func__, -1, -1, regs);
die_if_kernel("exception", regs, ex);
}
int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
{
/* Syscall debug */
printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
die_if_kernel("unknown trapa", regs, scId);
return -ENOSYS;
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
#ifdef CONFIG_KALLSYMS
extern void sh64_unwind(struct pt_regs *regs);
struct pt_regs *regs;
regs = tsk ? tsk->thread.kregs : NULL;
sh64_unwind(regs);
#else
printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
#endif
}
void show_task(unsigned long *sp)
{
show_stack(NULL, sp);
}
void dump_stack(void)
{
show_task(NULL);
}
/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
EXPORT_SYMBOL(dump_stack);
static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
{
show_excp_regs(fn_name, trapnr, signr, regs);
tsk->thread.error_code = error_code;
tsk->thread.trap_no = trapnr;
if (user_mode(regs))
force_sig(signr, tsk);
die_if_no_fixup(str, regs, error_code);
}
static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
unsigned long opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
} else {
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
*result_opcode = *(unsigned long *) aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
/* SHcompact */
/* TODO : provide handling for this. We don't really support
user-mode SHcompact yet, and for a kernel fault, this would
have to come from a module built for SHcompact. */
return -EFAULT;
} else {
/* misaligned */
return -EFAULT;
}
}
static int address_is_sign_extended(__u64 a)
{
__u64 b;
#if (NEFF == 32)
b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
return (b == a) ? 1 : 0;
#else
#error "Sign extend check only works for NEFF==32"
#endif
}
static int generate_and_check_address(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
/* return -1 for fault, 0 for OK */
__u64 base_address, addr;
int basereg;
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
__s64 displacement;
displacement = (opcode >> 10) & 0x3ff;
displacement = ((displacement << 54) >> 54); /* sign extend */
addr = (__u64)((__s64)base_address + (displacement << width_shift));
} else {
__u64 offset;
int offsetreg;
offsetreg = (opcode >> 10) & 0x3f;
offset = regs->regs[offsetreg];
addr = base_address + offset;
}
/* Check sign extended */
if (!address_is_sign_extended(addr)) {
return -1;
}
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
if (addr >= TASK_SIZE) {
return -1;
}
/* Do access_ok check later - it depends on whether it's a load or a store. */
}
*address = addr;
return 0;
}
static int user_mode_unaligned_fixup_count = 10;
static int user_mode_unaligned_fixup_enable = 1;
static int kernel_mode_unaligned_fixup_count = 32;
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
q[0] = p[0];
q[1] = p[1];
if (do_sign_extend) {
*result = (__u64)(__s64) *(short *) &x;
} else {
*result = (__u64) x;
}
}
static void misaligned_kernel_word_store(__u64 address, __u64 value)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
x = (__u16) value;
p[0] = q[0];
p[1] = q[1];
}
static int misaligned_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
switch (width_shift) {
case 1:
if (do_sign_extend) {
regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
} else {
regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
}
break;
case 2:
regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
break;
case 3:
regs->regs[destreg] = buffer;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
switch (width_shift) {
case 1:
misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]);
break;
case 2:
asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
case 3:
asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
static int misaligned_store(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
switch (width_shift) {
case 1:
*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
break;
case 2:
*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
break;
case 3:
buffer = regs->regs[srcreg];
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
switch (width_shift) {
case 1:
misaligned_kernel_word_store(address, val);
break;
case 2:
asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
break;
case 3:
asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fpu_store(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
/* Initialise these to NaNs. */
__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
switch (width_shift) {
case 2:
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
*(__u32*) &buffer = buflo;
*(1 + (__u32*) &buffer) = bufhi;
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fixup(struct pt_regs *regs)
{
unsigned long opcode;
int error;
int major, minor;
if (!user_mode_unaligned_fixup_enable)
return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
return error;
}
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
--user_mode_unaligned_fixup_count;
/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
} else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
--kernel_mode_unaligned_fixup_count;
if (in_interrupt()) {
printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
(__u32)regs->pc, opcode);
} else {
printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
}
}
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
break;
case (0xb0>>2): /* LD.UW */
error = misaligned_load(regs, opcode, 1, 1, 0);
break;
case (0x88>>2): /* LD.L */
error = misaligned_load(regs, opcode, 1, 2, 1);
break;
case (0x8c>>2): /* LD.Q */
error = misaligned_load(regs, opcode, 1, 3, 0);
break;
case (0xa4>>2): /* ST.W */
error = misaligned_store(regs, opcode, 1, 1);
break;
case (0xa8>>2): /* ST.L */
error = misaligned_store(regs, opcode, 1, 2);
break;
case (0xac>>2): /* ST.Q */
error = misaligned_store(regs, opcode, 1, 3);
break;
case (0x40>>2): /* indexed loads */
switch (minor) {
case 0x1: /* LDX.W */
error = misaligned_load(regs, opcode, 0, 1, 1);
break;
case 0x5: /* LDX.UW */
error = misaligned_load(regs, opcode, 0, 1, 0);
break;
case 0x2: /* LDX.L */
error = misaligned_load(regs, opcode, 0, 2, 1);
break;
case 0x3: /* LDX.Q */
error = misaligned_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0x60>>2): /* indexed stores */
switch (minor) {
case 0x1: /* STX.W */
error = misaligned_store(regs, opcode, 0, 1);
break;
case 0x2: /* STX.L */
error = misaligned_store(regs, opcode, 0, 2);
break;
case 0x3: /* STX.Q */
error = misaligned_store(regs, opcode, 0, 3);
break;
default:
error = -1;
break;
}
break;
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
case (0x98>>2): /* FLD.P */
error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
break;
case (0x9c>>2): /* FLD.D */
error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
break;
case (0x1c>>2): /* floating indexed loads */
switch (minor) {
case 0x8: /* FLDX.S */
error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FLDX.P */
error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FLDX.D */
error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0xb4>>2): /* FLD.S */
error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
break;
case (0xb8>>2): /* FLD.P */
error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
break;
case (0xbc>>2): /* FLD.D */
error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
break;
case (0x3c>>2): /* floating indexed stores */
switch (minor) {
case 0x8: /* FSTX.S */
error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FSTX.P */
error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FSTX.D */
error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
default:
/* Fault */
error = -1;
break;
}
if (error < 0) {
return error;
} else {
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
}
static ctl_table unaligned_table[] = {
{
.procname = "kernel_reports",
.data = &kernel_mode_unaligned_fixup_count,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "user_reports",
.data = &user_mode_unaligned_fixup_count,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "user_enable",
.data = &user_mode_unaligned_fixup_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec},
{}
};
static ctl_table unaligned_root[] = {
{
.procname = "unaligned_fixup",
.mode = 0555,
.child = unaligned_table
},
{}
};
static ctl_table sh64_root[] = {
{
.procname = "sh64",
.mode = 0555,
.child = unaligned_root
},
{}
};
static struct ctl_table_header *sysctl_header;
static int __init init_sysctl(void)
{
sysctl_header = register_sysctl_table(sh64_root);
return 0;
}
__initcall(init_sysctl);
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
u64 peek_real_address_q(u64 addr);
u64 poke_real_address_q(u64 addr, u64 val);
unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
unsigned long long exp_cause;
/* It's not worth ioremapping the debug module registers for the amount
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
if (exp_cause & ~4) {
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
}
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
void __cpuinit per_cpu_trap_init(void)
{
/* Nothing to do for now, VBR initialization later. */
}
| /*
* arch/sh/kernel/traps_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
#undef DEBUG_EXCEPTION
#ifdef DEBUG_EXCEPTION
/* implemented in ../lib/dbg.c */
extern void show_excp_regs(char *fname, int trapnr, int signr,
struct pt_regs *regs);
#else
#define show_excp_regs(a, b, c, d)
#endif
static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
#define DO_ERROR(trapnr, signr, str, name, tsk) \
asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
{ \
do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
console_verbose();
spin_lock_irq(&die_lock);
printk("%s: %lx\n", str, (err & 0xffffff));
show_regs(regs);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
/* Implement misaligned load/store handling for kernel (and optionally for user
mode too). Limitation : only SHmedia mode code is handled - there is no
handling at all for misaligned accesses occurring in SHcompact code yet. */
static int misaligned_fixup(struct pt_regs *regs);
asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0) {
do_unhandled_exception(7, SIGSEGV, "address error(load)",
"do_address_error_load",
error_code, regs, current);
}
return;
}
asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0) {
do_unhandled_exception(8, SIGSEGV, "address error(store)",
"do_address_error_store",
error_code, regs, current);
}
return;
}
#if defined(CONFIG_SH64_ID2815_WORKAROUND)
#define OPCODE_INVALID 0
#define OPCODE_USER_VALID 1
#define OPCODE_PRIV_VALID 2
/* getcon/putcon - requires checking which control register is referenced. */
#define OPCODE_CTRL_REG 3
/* Table of valid opcodes for SHmedia mode.
Form a 10-bit value by concatenating the major/minor opcodes i.e.
opcode[31:26,20:16]. The 6 MSBs of this value index into the following
array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
LSBs==4'b0000 etc). */
static unsigned long shmedia_opcode_table[64] = {
0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
/* Workaround SH5-101 cut2 silicon defect #2815 :
in some situations, inter-mode branches from SHcompact -> SHmedia
which should take ITLBMISS or EXECPROT exceptions at the target
falsely take RESINST at the target instead. */
unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
unsigned long pc, aligned_pc;
int get_user_error;
int trapnr = 12;
int signr = SIGILL;
char *exception_name = "reserved_instruction";
pc = regs->pc;
if ((pc & 3) == 1) {
/* SHmedia : check for defect. This requires executable vmas
to be readable too. */
aligned_pc = pc & ~3;
if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
}
if (get_user_error >= 0) {
unsigned long index, shift;
unsigned long major, minor, combined;
unsigned long reserved_field;
reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
combined = (major << 4) | minor;
index = major;
shift = minor << 1;
if (reserved_field == 0) {
int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
switch (opcode_state) {
case OPCODE_INVALID:
/* Trap. */
break;
case OPCODE_USER_VALID:
/* Restart the instruction : the branch to the instruction will now be from an RTE
not from SHcompact so the silicon defect won't be triggered. */
return;
case OPCODE_PRIV_VALID:
if (!user_mode(regs)) {
/* Should only ever get here if a module has
SHcompact code inside it. If so, the same fix up is needed. */
return; /* same reason */
}
/* Otherwise, user mode trying to execute a privileged instruction -
fall through to trap. */
break;
case OPCODE_CTRL_REG:
/* If in privileged mode, return as above. */
if (!user_mode(regs)) return;
/* In user mode ... */
if (combined == 0x9f) { /* GETCON */
unsigned long regno = (opcode >> 20) & 0x3f;
if (regno >= 62) {
return;
}
/* Otherwise, reserved or privileged control register, => trap */
} else if (combined == 0x1bf) { /* PUTCON */
unsigned long regno = (opcode >> 4) & 0x3f;
if (regno >= 62) {
return;
}
/* Otherwise, reserved or privileged control register, => trap */
} else {
/* Trap */
}
break;
default:
/* Fall through to trap. */
break;
}
}
/* fall through to normal resinst processing */
} else {
/* Error trying to read opcode. This typically means a
real fault, not a RESINST any more. So change the
codes. */
trapnr = 87;
exception_name = "address error (exec)";
signr = SIGSEGV;
}
}
do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
}
#else /* CONFIG_SH64_ID2815_WORKAROUND */
/* If the workaround isn't needed, this is just a straightforward reserved
instruction */
DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
#endif /* CONFIG_SH64_ID2815_WORKAROUND */
/* Called with interrupts disabled */
asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
{
show_excp_regs(__func__, -1, -1, regs);
die_if_kernel("exception", regs, ex);
}
int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
{
/* Syscall debug */
printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
die_if_kernel("unknown trapa", regs, scId);
return -ENOSYS;
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
#ifdef CONFIG_KALLSYMS
extern void sh64_unwind(struct pt_regs *regs);
struct pt_regs *regs;
regs = tsk ? tsk->thread.kregs : NULL;
sh64_unwind(regs);
#else
printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
#endif
}
void show_task(unsigned long *sp)
{
show_stack(NULL, sp);
}
void dump_stack(void)
{
show_task(NULL);
}
/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
EXPORT_SYMBOL(dump_stack);
static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
{
show_excp_regs(fn_name, trapnr, signr, regs);
tsk->thread.error_code = error_code;
tsk->thread.trap_no = trapnr;
if (user_mode(regs))
force_sig(signr, tsk);
die_if_no_fixup(str, regs, error_code);
}
static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
unsigned long opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
} else {
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
*result_opcode = *(unsigned long *) aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
/* SHcompact */
/* TODO : provide handling for this. We don't really support
user-mode SHcompact yet, and for a kernel fault, this would
have to come from a module built for SHcompact. */
return -EFAULT;
} else {
/* misaligned */
return -EFAULT;
}
}
static int address_is_sign_extended(__u64 a)
{
__u64 b;
#if (NEFF == 32)
b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
return (b == a) ? 1 : 0;
#else
#error "Sign extend check only works for NEFF==32"
#endif
}
static int generate_and_check_address(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
/* return -1 for fault, 0 for OK */
__u64 base_address, addr;
int basereg;
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
__s64 displacement;
displacement = (opcode >> 10) & 0x3ff;
displacement = ((displacement << 54) >> 54); /* sign extend */
addr = (__u64)((__s64)base_address + (displacement << width_shift));
} else {
__u64 offset;
int offsetreg;
offsetreg = (opcode >> 10) & 0x3f;
offset = regs->regs[offsetreg];
addr = base_address + offset;
}
/* Check sign extended */
if (!address_is_sign_extended(addr)) {
return -1;
}
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
if (addr >= TASK_SIZE) {
return -1;
}
/* Do access_ok check later - it depends on whether it's a load or a store. */
}
*address = addr;
return 0;
}
static int user_mode_unaligned_fixup_count = 10;
static int user_mode_unaligned_fixup_enable = 1;
static int kernel_mode_unaligned_fixup_count = 32;
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
q[0] = p[0];
q[1] = p[1];
if (do_sign_extend) {
*result = (__u64)(__s64) *(short *) &x;
} else {
*result = (__u64) x;
}
}
static void misaligned_kernel_word_store(__u64 address, __u64 value)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
x = (__u16) value;
p[0] = q[0];
p[1] = q[1];
}
static int misaligned_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
switch (width_shift) {
case 1:
if (do_sign_extend) {
regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
} else {
regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
}
break;
case 2:
regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
break;
case 3:
regs->regs[destreg] = buffer;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
switch (width_shift) {
case 1:
misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]);
break;
case 2:
asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
case 3:
asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
static int misaligned_store(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
switch (width_shift) {
case 1:
*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
break;
case 2:
*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
break;
case 3:
buffer = regs->regs[srcreg];
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
switch (width_shift) {
case 1:
misaligned_kernel_word_store(address, val);
break;
case 2:
asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
break;
case 3:
asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fpu_store(struct pt_regs *regs,
__u32 opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0) {
return error;
}
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
/* Initialise these to NaNs. */
__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
return -1;
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
switch (width_shift) {
case 2:
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
*(__u32*) &buffer = buflo;
*(1 + (__u32*) &buffer) = bufhi;
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fixup(struct pt_regs *regs)
{
unsigned long opcode;
int error;
int major, minor;
if (!user_mode_unaligned_fixup_enable)
return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
return error;
}
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
--user_mode_unaligned_fixup_count;
/* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
} else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
--kernel_mode_unaligned_fixup_count;
if (in_interrupt()) {
printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
(__u32)regs->pc, opcode);
} else {
printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
}
}
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
break;
case (0xb0>>2): /* LD.UW */
error = misaligned_load(regs, opcode, 1, 1, 0);
break;
case (0x88>>2): /* LD.L */
error = misaligned_load(regs, opcode, 1, 2, 1);
break;
case (0x8c>>2): /* LD.Q */
error = misaligned_load(regs, opcode, 1, 3, 0);
break;
case (0xa4>>2): /* ST.W */
error = misaligned_store(regs, opcode, 1, 1);
break;
case (0xa8>>2): /* ST.L */
error = misaligned_store(regs, opcode, 1, 2);
break;
case (0xac>>2): /* ST.Q */
error = misaligned_store(regs, opcode, 1, 3);
break;
case (0x40>>2): /* indexed loads */
switch (minor) {
case 0x1: /* LDX.W */
error = misaligned_load(regs, opcode, 0, 1, 1);
break;
case 0x5: /* LDX.UW */
error = misaligned_load(regs, opcode, 0, 1, 0);
break;
case 0x2: /* LDX.L */
error = misaligned_load(regs, opcode, 0, 2, 1);
break;
case 0x3: /* LDX.Q */
error = misaligned_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0x60>>2): /* indexed stores */
switch (minor) {
case 0x1: /* STX.W */
error = misaligned_store(regs, opcode, 0, 1);
break;
case 0x2: /* STX.L */
error = misaligned_store(regs, opcode, 0, 2);
break;
case 0x3: /* STX.Q */
error = misaligned_store(regs, opcode, 0, 3);
break;
default:
error = -1;
break;
}
break;
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
case (0x98>>2): /* FLD.P */
error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
break;
case (0x9c>>2): /* FLD.D */
error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
break;
case (0x1c>>2): /* floating indexed loads */
switch (minor) {
case 0x8: /* FLDX.S */
error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FLDX.P */
error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FLDX.D */
error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0xb4>>2): /* FLD.S */
error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
break;
case (0xb8>>2): /* FLD.P */
error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
break;
case (0xbc>>2): /* FLD.D */
error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
break;
case (0x3c>>2): /* floating indexed stores */
switch (minor) {
case 0x8: /* FSTX.S */
error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FSTX.P */
error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FSTX.D */
error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
default:
/* Fault */
error = -1;
break;
}
if (error < 0) {
return error;
} else {
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
}
static ctl_table unaligned_table[] = {
{
.procname = "kernel_reports",
.data = &kernel_mode_unaligned_fixup_count,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "user_reports",
.data = &user_mode_unaligned_fixup_count,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
.procname = "user_enable",
.data = &user_mode_unaligned_fixup_enable,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec},
{}
};
static ctl_table unaligned_root[] = {
{
.procname = "unaligned_fixup",
.mode = 0555,
.child = unaligned_table
},
{}
};
static ctl_table sh64_root[] = {
{
.procname = "sh64",
.mode = 0555,
.child = unaligned_root
},
{}
};
static struct ctl_table_header *sysctl_header;
static int __init init_sysctl(void)
{
sysctl_header = register_sysctl_table(sh64_root);
return 0;
}
__initcall(init_sysctl);
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
u64 peek_real_address_q(u64 addr);
u64 poke_real_address_q(u64 addr, u64 val);
unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
unsigned long long exp_cause;
/* It's not worth ioremapping the debug module registers for the amount
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
if (exp_cause & ~4) {
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
}
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
void __cpuinit per_cpu_trap_init(void)
{
/* Nothing to do for now, VBR initialization later. */
}
|
56,656,639,446,871 | /*
* arch/sh/math-emu/math.c
*
* Copyright (C) 2006 Takashi YOSHII <takasi-y@ops.dti.ne.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
#include "sfp-util.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#define FPUL (fregs->fpul)
#define FPSCR (fregs->fpscr)
#define FPSCR_RM (FPSCR&3)
#define FPSCR_DN ((FPSCR>>18)&1)
#define FPSCR_PR ((FPSCR>>19)&1)
#define FPSCR_SZ ((FPSCR>>20)&1)
#define FPSCR_FR ((FPSCR>>21)&1)
#define FPSCR_MASK 0x003fffffUL
#define BANK(n) (n^(FPSCR_FR?16:0))
#define FR ((unsigned long*)(fregs->fp_regs))
#define FR0 (FR[BANK(0)])
#define FRn (FR[BANK(n)])
#define FRm (FR[BANK(m)])
#define DR ((unsigned long long*)(fregs->fp_regs))
#define DRn (DR[BANK(n)/2])
#define DRm (DR[BANK(m)/2])
#define XREG(n) (n^16)
#define XFn (FR[BANK(XREG(n))])
#define XFm (FR[BANK(XREG(m))])
#define XDn (DR[BANK(XREG(n))/2])
#define XDm (DR[BANK(XREG(m))/2])
#define R0 (regs->regs[0])
#define Rn (regs->regs[n])
#define Rm (regs->regs[m])
#define WRITE(d,a) ({if(put_user(d, (typeof (d)*)a)) return -EFAULT;})
#define READ(d,a) ({if(get_user(d, (typeof (d)*)a)) return -EFAULT;})
#define PACK_S(r,f) FP_PACK_SP(&r,f)
#define UNPACK_S(f,r) FP_UNPACK_SP(f,&r)
#define PACK_D(r,f) \
{u32 t[2]; FP_PACK_DP(t,f); ((u32*)&r)[0]=t[1]; ((u32*)&r)[1]=t[0];}
#define UNPACK_D(f,r) \
{u32 t[2]; t[0]=((u32*)&r)[1]; t[1]=((u32*)&r)[0]; FP_UNPACK_DP(f,t);}
// 2 args instructions.
#define BOTH_PRmn(op,x) \
FP_DECL_EX; if(FPSCR_PR) op(D,x,DRm,DRn); else op(S,x,FRm,FRn);
#define CMP_X(SZ,R,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_CMP_##SZ(R, Fn, Fm, 2); }while(0)
#define EQ_X(SZ,R,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_CMP_EQ_##SZ(R, Fn, Fm); }while(0)
#define CMP(OP) ({ int r; BOTH_PRmn(OP##_X,r); r; })
static int
fcmp_gt(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP) > 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
static int
fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP /*EQ*/) == 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
#define ARITH_X(SZ,OP,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); FP_DECL_##SZ(Fr); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_##OP##_##SZ(Fr, Fn, Fm); \
PACK_##SZ(N, Fr); }while(0)
static int
fadd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, ADD);
return 0;
}
static int
fsub(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, SUB);
return 0;
}
static int
fmul(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, MUL);
return 0;
}
static int
fdiv(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, DIV);
return 0;
}
static int
fmac(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
FP_DECL_EX;
FP_DECL_S(Fr);
FP_DECL_S(Ft);
FP_DECL_S(F0);
FP_DECL_S(Fm);
FP_DECL_S(Fn);
UNPACK_S(F0, FR0);
UNPACK_S(Fm, FRm);
UNPACK_S(Fn, FRn);
FP_MUL_S(Ft, Fm, F0);
FP_ADD_S(Fr, Fn, Ft);
PACK_S(FRn, Fr);
return 0;
}
// to process fmov's extension (odd n for DR access XD).
#define FMOV_EXT(x) if(x&1) x+=16-1
static int
fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
READ(FRn, Rm + R0 + 4);
n++;
READ(FRn, Rm + R0);
} else {
READ(FRn, Rm + R0);
}
return 0;
}
static int
fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
READ(FRn, Rm + 4);
n++;
READ(FRn, Rm);
} else {
READ(FRn, Rm);
}
return 0;
}
static int
fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
READ(FRn, Rm + 4);
n++;
READ(FRn, Rm);
Rm += 8;
} else {
READ(FRn, Rm);
Rm += 4;
}
return 0;
}
static int
fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
WRITE(FRm, Rn + R0 + 4);
m++;
WRITE(FRm, Rn + R0);
} else {
WRITE(FRm, Rn + R0);
}
return 0;
}
static int
fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
WRITE(FRm, Rn + 4);
m++;
WRITE(FRm, Rn);
} else {
WRITE(FRm, Rn);
}
return 0;
}
static int
fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
Rn -= 8;
WRITE(FRm, Rn + 4);
m++;
WRITE(FRm, Rn);
} else {
Rn -= 4;
WRITE(FRm, Rn);
}
return 0;
}
static int
fmov_reg_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
FMOV_EXT(n);
DRn = DRm;
} else {
FRn = FRm;
}
return 0;
}
static int
fnop_mn(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
return -EINVAL;
}
// 1 arg instructions.
#define NOTYETn(i) static int i(struct sh_fpu_soft_struct *fregs, int n) \
{ printk( #i " not yet done.\n"); return 0; }
NOTYETn(ftrv)
NOTYETn(fsqrt)
NOTYETn(fipr)
NOTYETn(fsca)
NOTYETn(fsrra)
#define EMU_FLOAT_X(SZ,N) do { \
FP_DECL_##SZ(Fn); \
FP_FROM_INT_##SZ(Fn, FPUL, 32, int); \
PACK_##SZ(N, Fn); }while(0)
static int ffloat(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
if (FPSCR_PR)
EMU_FLOAT_X(D, DRn);
else
EMU_FLOAT_X(S, FRn);
return 0;
}
#define EMU_FTRC_X(SZ,N) do { \
FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fn, N); \
FP_TO_INT_##SZ(FPUL, Fn, 32, 1); }while(0)
static int ftrc(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
if (FPSCR_PR)
EMU_FTRC_X(D, DRn);
else
EMU_FTRC_X(S, FRn);
return 0;
}
static int fcnvsd(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
FP_DECL_S(Fn);
FP_DECL_D(Fr);
UNPACK_S(Fn, FPUL);
FP_CONV(D, S, 2, 1, Fr, Fn);
PACK_D(DRn, Fr);
return 0;
}
static int fcnvds(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
FP_DECL_D(Fn);
FP_DECL_S(Fr);
UNPACK_D(Fn, DRn);
FP_CONV(S, D, 1, 2, Fr, Fn);
PACK_S(FPUL, Fr);
return 0;
}
static int fxchg(struct sh_fpu_soft_struct *fregs, int flag)
{
FPSCR ^= flag;
return 0;
}
static int fsts(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = FPUL;
return 0;
}
static int flds(struct sh_fpu_soft_struct *fregs, int n)
{
FPUL = FRn;
return 0;
}
static int fneg(struct sh_fpu_soft_struct *fregs, int n)
{
FRn ^= (1 << (_FP_W_TYPE_SIZE - 1));
return 0;
}
static int fabs(struct sh_fpu_soft_struct *fregs, int n)
{
FRn &= ~(1 << (_FP_W_TYPE_SIZE - 1));
return 0;
}
static int fld0(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = 0;
return 0;
}
static int fld1(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = (_FP_EXPBIAS_S << (_FP_FRACBITS_S - 1));
return 0;
}
static int fnop_n(struct sh_fpu_soft_struct *fregs, int n)
{
return -EINVAL;
}
/// Instruction decoders.
static int id_fxfd(struct sh_fpu_soft_struct *, int);
static int id_fnxd(struct sh_fpu_soft_struct *, struct pt_regs *, int, int);
static int (*fnxd[])(struct sh_fpu_soft_struct *, int) = {
fsts, flds, ffloat, ftrc, fneg, fabs, fsqrt, fsrra,
fld0, fld1, fcnvsd, fcnvds, fnop_n, fnop_n, fipr, id_fxfd
};
static int (*fnmx[])(struct sh_fpu_soft_struct *, struct pt_regs *, int, int) = {
fadd, fsub, fmul, fdiv, fcmp_eq, fcmp_gt, fmov_idx_reg, fmov_reg_idx,
fmov_mem_reg, fmov_inc_reg, fmov_reg_mem, fmov_reg_dec,
fmov_reg_reg, id_fnxd, fmac, fnop_mn};
static int id_fxfd(struct sh_fpu_soft_struct *fregs, int x)
{
const int flag[] = { FPSCR_SZ, FPSCR_PR, FPSCR_FR, 0 };
switch (x & 3) {
case 3:
fxchg(fregs, flag[x >> 2]);
break;
case 1:
ftrv(fregs, x - 1);
break;
default:
fsca(fregs, x);
}
return 0;
}
static int
id_fnxd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int x, int n)
{
return (fnxd[x])(fregs, n);
}
static int
id_fnmx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
{
int n = (code >> 8) & 0xf, m = (code >> 4) & 0xf, x = code & 0xf;
return (fnmx[x])(fregs, regs, m, n);
}
static int
id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
{
int n = ((code >> 8) & 0xf);
unsigned long *reg = (code & 0x0010) ? &FPUL : &FPSCR;
switch (code & 0xf0ff) {
case 0x005a:
case 0x006a:
Rn = *reg;
break;
case 0x405a:
case 0x406a:
*reg = Rn;
break;
case 0x4052:
case 0x4062:
Rn -= 4;
WRITE(*reg, Rn);
break;
case 0x4056:
case 0x4066:
READ(*reg, Rn);
Rn += 4;
break;
default:
return -EINVAL;
}
return 0;
}
static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs)
{
if ((code & 0xf000) == 0xf000)
return id_fnmx(fregs, regs, code);
else
return id_sys(fregs, regs, code);
}
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_soft structure
* @n: Index to FP register
*/
static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n+1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int ieee_fpe_handler(struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *)regs->pc;
unsigned short finsn;
unsigned long nextpc;
siginfo_t info;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf};
if (nib[0] == 0xb ||
(nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
regs->pr = regs->pc + 4;
if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (insn == 0x000b) { /* rts */
nextpc = regs->pr;
finsn = *(unsigned short *) (regs->pc + 2);
} else {
nextpc = regs->pc + 2;
finsn = insn;
}
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
/* FPU error */
denormal_to_double (&tsk->thread.xstate->softfpu,
(finsn >> 8) & 0xf);
tsk->thread.xstate->softfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
task_thread_info(tsk)->status |= TS_USEDFPU;
} else {
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = FPE_FLTINV;
info.si_addr = (void __user *)regs->pc;
force_sig_info(SIGFPE, &info, tsk);
}
regs->pc = nextpc;
return 1;
}
return 0;
}
asmlinkage void do_fpu_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
struct task_struct *tsk = current;
siginfo_t info;
if (ieee_fpe_handler (®s))
return;
regs.pc += 2;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = FPE_FLTINV;
info.si_addr = (void __user *)regs.pc;
force_sig_info(SIGFPE, &info, tsk);
}
/**
* fpu_init - Initialize FPU registers
* @fpu: Pointer to software emulated FPU registers.
*/
static void fpu_init(struct sh_fpu_soft_struct *fpu)
{
int i;
fpu->fpscr = FPSCR_INIT;
fpu->fpul = 0;
for (i = 0; i < 16; i++) {
fpu->fp_regs[i] = 0;
fpu->xfp_regs[i]= 0;
}
}
/**
* do_fpu_inst - Handle reserved instructions for FPU emulation
* @inst: instruction code.
* @regs: registers on stack.
*/
int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
fpu_init(fpu);
task_thread_info(tsk)->status |= TS_USEDFPU;
}
return fpu_emulate(inst, fpu, regs);
}
| /*
* arch/sh/math-emu/math.c
*
* Copyright (C) 2006 Takashi YOSHII <takasi-y@ops.dti.ne.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
#include "sfp-util.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#define FPUL (fregs->fpul)
#define FPSCR (fregs->fpscr)
#define FPSCR_RM (FPSCR&3)
#define FPSCR_DN ((FPSCR>>18)&1)
#define FPSCR_PR ((FPSCR>>19)&1)
#define FPSCR_SZ ((FPSCR>>20)&1)
#define FPSCR_FR ((FPSCR>>21)&1)
#define FPSCR_MASK 0x003fffffUL
#define BANK(n) (n^(FPSCR_FR?16:0))
#define FR ((unsigned long*)(fregs->fp_regs))
#define FR0 (FR[BANK(0)])
#define FRn (FR[BANK(n)])
#define FRm (FR[BANK(m)])
#define DR ((unsigned long long*)(fregs->fp_regs))
#define DRn (DR[BANK(n)/2])
#define DRm (DR[BANK(m)/2])
#define XREG(n) (n^16)
#define XFn (FR[BANK(XREG(n))])
#define XFm (FR[BANK(XREG(m))])
#define XDn (DR[BANK(XREG(n))/2])
#define XDm (DR[BANK(XREG(m))/2])
#define R0 (regs->regs[0])
#define Rn (regs->regs[n])
#define Rm (regs->regs[m])
#define WRITE(d,a) ({if(put_user(d, (typeof (d)*)a)) return -EFAULT;})
#define READ(d,a) ({if(get_user(d, (typeof (d)*)a)) return -EFAULT;})
#define PACK_S(r,f) FP_PACK_SP(&r,f)
#define UNPACK_S(f,r) FP_UNPACK_SP(f,&r)
#define PACK_D(r,f) \
{u32 t[2]; FP_PACK_DP(t,f); ((u32*)&r)[0]=t[1]; ((u32*)&r)[1]=t[0];}
#define UNPACK_D(f,r) \
{u32 t[2]; t[0]=((u32*)&r)[1]; t[1]=((u32*)&r)[0]; FP_UNPACK_DP(f,t);}
// 2 args instructions.
#define BOTH_PRmn(op,x) \
FP_DECL_EX; if(FPSCR_PR) op(D,x,DRm,DRn); else op(S,x,FRm,FRn);
#define CMP_X(SZ,R,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_CMP_##SZ(R, Fn, Fm, 2); }while(0)
#define EQ_X(SZ,R,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_CMP_EQ_##SZ(R, Fn, Fm); }while(0)
#define CMP(OP) ({ int r; BOTH_PRmn(OP##_X,r); r; })
static int
fcmp_gt(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP) > 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
static int
fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP /*EQ*/) == 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
#define ARITH_X(SZ,OP,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); FP_DECL_##SZ(Fr); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_##OP##_##SZ(Fr, Fn, Fm); \
PACK_##SZ(N, Fr); }while(0)
static int
fadd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, ADD);
return 0;
}
static int
fsub(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, SUB);
return 0;
}
static int
fmul(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, MUL);
return 0;
}
static int
fdiv(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, DIV);
return 0;
}
static int
fmac(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
FP_DECL_EX;
FP_DECL_S(Fr);
FP_DECL_S(Ft);
FP_DECL_S(F0);
FP_DECL_S(Fm);
FP_DECL_S(Fn);
UNPACK_S(F0, FR0);
UNPACK_S(Fm, FRm);
UNPACK_S(Fn, FRn);
FP_MUL_S(Ft, Fm, F0);
FP_ADD_S(Fr, Fn, Ft);
PACK_S(FRn, Fr);
return 0;
}
// to process fmov's extension (odd n for DR access XD).
#define FMOV_EXT(x) if(x&1) x+=16-1
static int
fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
READ(FRn, Rm + R0 + 4);
n++;
READ(FRn, Rm + R0);
} else {
READ(FRn, Rm + R0);
}
return 0;
}
static int
fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
READ(FRn, Rm + 4);
n++;
READ(FRn, Rm);
} else {
READ(FRn, Rm);
}
return 0;
}
static int
fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
READ(FRn, Rm + 4);
n++;
READ(FRn, Rm);
Rm += 8;
} else {
READ(FRn, Rm);
Rm += 4;
}
return 0;
}
static int
fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
WRITE(FRm, Rn + R0 + 4);
m++;
WRITE(FRm, Rn + R0);
} else {
WRITE(FRm, Rn + R0);
}
return 0;
}
static int
fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
WRITE(FRm, Rn + 4);
m++;
WRITE(FRm, Rn);
} else {
WRITE(FRm, Rn);
}
return 0;
}
static int
fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
Rn -= 8;
WRITE(FRm, Rn + 4);
m++;
WRITE(FRm, Rn);
} else {
Rn -= 4;
WRITE(FRm, Rn);
}
return 0;
}
static int
fmov_reg_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
FMOV_EXT(n);
DRn = DRm;
} else {
FRn = FRm;
}
return 0;
}
static int
fnop_mn(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
return -EINVAL;
}
// 1 arg instructions.
#define NOTYETn(i) static int i(struct sh_fpu_soft_struct *fregs, int n) \
{ printk( #i " not yet done.\n"); return 0; }
NOTYETn(ftrv)
NOTYETn(fsqrt)
NOTYETn(fipr)
NOTYETn(fsca)
NOTYETn(fsrra)
#define EMU_FLOAT_X(SZ,N) do { \
FP_DECL_##SZ(Fn); \
FP_FROM_INT_##SZ(Fn, FPUL, 32, int); \
PACK_##SZ(N, Fn); }while(0)
static int ffloat(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
if (FPSCR_PR)
EMU_FLOAT_X(D, DRn);
else
EMU_FLOAT_X(S, FRn);
return 0;
}
#define EMU_FTRC_X(SZ,N) do { \
FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fn, N); \
FP_TO_INT_##SZ(FPUL, Fn, 32, 1); }while(0)
static int ftrc(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
if (FPSCR_PR)
EMU_FTRC_X(D, DRn);
else
EMU_FTRC_X(S, FRn);
return 0;
}
static int fcnvsd(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
FP_DECL_S(Fn);
FP_DECL_D(Fr);
UNPACK_S(Fn, FPUL);
FP_CONV(D, S, 2, 1, Fr, Fn);
PACK_D(DRn, Fr);
return 0;
}
static int fcnvds(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
FP_DECL_D(Fn);
FP_DECL_S(Fr);
UNPACK_D(Fn, DRn);
FP_CONV(S, D, 1, 2, Fr, Fn);
PACK_S(FPUL, Fr);
return 0;
}
static int fxchg(struct sh_fpu_soft_struct *fregs, int flag)
{
FPSCR ^= flag;
return 0;
}
static int fsts(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = FPUL;
return 0;
}
static int flds(struct sh_fpu_soft_struct *fregs, int n)
{
FPUL = FRn;
return 0;
}
static int fneg(struct sh_fpu_soft_struct *fregs, int n)
{
FRn ^= (1 << (_FP_W_TYPE_SIZE - 1));
return 0;
}
static int fabs(struct sh_fpu_soft_struct *fregs, int n)
{
FRn &= ~(1 << (_FP_W_TYPE_SIZE - 1));
return 0;
}
static int fld0(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = 0;
return 0;
}
static int fld1(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = (_FP_EXPBIAS_S << (_FP_FRACBITS_S - 1));
return 0;
}
static int fnop_n(struct sh_fpu_soft_struct *fregs, int n)
{
return -EINVAL;
}
/// Instruction decoders.
static int id_fxfd(struct sh_fpu_soft_struct *, int);
static int id_fnxd(struct sh_fpu_soft_struct *, struct pt_regs *, int, int);
static int (*fnxd[])(struct sh_fpu_soft_struct *, int) = {
fsts, flds, ffloat, ftrc, fneg, fabs, fsqrt, fsrra,
fld0, fld1, fcnvsd, fcnvds, fnop_n, fnop_n, fipr, id_fxfd
};
static int (*fnmx[])(struct sh_fpu_soft_struct *, struct pt_regs *, int, int) = {
fadd, fsub, fmul, fdiv, fcmp_eq, fcmp_gt, fmov_idx_reg, fmov_reg_idx,
fmov_mem_reg, fmov_inc_reg, fmov_reg_mem, fmov_reg_dec,
fmov_reg_reg, id_fnxd, fmac, fnop_mn};
static int id_fxfd(struct sh_fpu_soft_struct *fregs, int x)
{
const int flag[] = { FPSCR_SZ, FPSCR_PR, FPSCR_FR, 0 };
switch (x & 3) {
case 3:
fxchg(fregs, flag[x >> 2]);
break;
case 1:
ftrv(fregs, x - 1);
break;
default:
fsca(fregs, x);
}
return 0;
}
static int
id_fnxd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int x, int n)
{
return (fnxd[x])(fregs, n);
}
static int
id_fnmx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
{
int n = (code >> 8) & 0xf, m = (code >> 4) & 0xf, x = code & 0xf;
return (fnmx[x])(fregs, regs, m, n);
}
static int
id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
{
int n = ((code >> 8) & 0xf);
unsigned long *reg = (code & 0x0010) ? &FPUL : &FPSCR;
switch (code & 0xf0ff) {
case 0x005a:
case 0x006a:
Rn = *reg;
break;
case 0x405a:
case 0x406a:
*reg = Rn;
break;
case 0x4052:
case 0x4062:
Rn -= 4;
WRITE(*reg, Rn);
break;
case 0x4056:
case 0x4066:
READ(*reg, Rn);
Rn += 4;
break;
default:
return -EINVAL;
}
return 0;
}
static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs)
{
if ((code & 0xf000) == 0xf000)
return id_fnmx(fregs, regs, code);
else
return id_sys(fregs, regs, code);
}
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_soft structure
* @n: Index to FP register
*/
static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n+1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int ieee_fpe_handler(struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *)regs->pc;
unsigned short finsn;
unsigned long nextpc;
siginfo_t info;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf};
if (nib[0] == 0xb ||
(nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
regs->pr = regs->pc + 4;
if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (insn == 0x000b) { /* rts */
nextpc = regs->pr;
finsn = *(unsigned short *) (regs->pc + 2);
} else {
nextpc = regs->pc + 2;
finsn = insn;
}
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
/* FPU error */
denormal_to_double (&tsk->thread.xstate->softfpu,
(finsn >> 8) & 0xf);
tsk->thread.xstate->softfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
task_thread_info(tsk)->status |= TS_USEDFPU;
} else {
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = FPE_FLTINV;
info.si_addr = (void __user *)regs->pc;
force_sig_info(SIGFPE, &info, tsk);
}
regs->pc = nextpc;
return 1;
}
return 0;
}
asmlinkage void do_fpu_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
struct task_struct *tsk = current;
siginfo_t info;
if (ieee_fpe_handler (®s))
return;
regs.pc += 2;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = FPE_FLTINV;
info.si_addr = (void __user *)regs.pc;
force_sig_info(SIGFPE, &info, tsk);
}
/**
* fpu_init - Initialize FPU registers
* @fpu: Pointer to software emulated FPU registers.
*/
static void fpu_init(struct sh_fpu_soft_struct *fpu)
{
int i;
fpu->fpscr = FPSCR_INIT;
fpu->fpul = 0;
for (i = 0; i < 16; i++) {
fpu->fp_regs[i] = 0;
fpu->xfp_regs[i]= 0;
}
}
/**
* do_fpu_inst - Handle reserved instructions for FPU emulation
* @inst: instruction code.
* @regs: registers on stack.
*/
int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
fpu_init(fpu);
task_thread_info(tsk)->status |= TS_USEDFPU;
}
return fpu_emulate(inst, fpu, regs);
}
|
98,545,598,903,890 | /*
* Page fault handler for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 - 2009 Paul Mundt
*
* Based on linux/arch/i386/mm/fault.c:
* Copyright (C) 1995 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
int ret = 0;
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1;
preempt_enable();
}
return ret;
}
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd += index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
return NULL;
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
return NULL;
if (!pud_present(*pud))
set_pud(pud, *pud_k);
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k);
else {
/*
* The page tables are fully synchronised so there must
* be another reason for the fault. Return NULL here to
* signal that we have not taken care of the fault.
*/
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
return NULL;
}
return pmd_k;
}
/*
* Handle a fault on the vmalloc or module mapping area
*/
static noinline int vmalloc_fault(unsigned long address)
{
pgd_t *pgd_k;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc/module/P3 area: */
if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
return -1;
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "current" here. We might be inside
* an interrupt in the middle of a task switch..
*/
pgd_k = get_TTB();
pmd_k = vmalloc_sync_one(pgd_k, address);
if (!pmd_k)
return -1;
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
return 0;
}
static int fault_in_kernel_space(unsigned long address)
{
return address >= TASK_SIZE;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long vec;
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
int si_code;
int fault;
siginfo_t info;
tsk = current;
mm = tsk->mm;
si_code = SEGV_MAPERR;
vec = lookup_exception_vector();
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (vmalloc_fault(address) >= 0)
return;
if (notify_page_fault(regs, vec))
return;
goto bad_area_nosemaphore;
}
if (unlikely(notify_page_fault(regs, vec)))
return;
/* Only enable interrupts if they were on before the fault */
if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
if (writeaccess) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
if (handle_trapped_io(regs, address))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*
*/
bust_spinlocks(1);
if (oops_may_print()) {
unsigned long page;
if (address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel paging "
"request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08lx\n", regs->pc);
page = (unsigned long)get_TTB();
if (page) {
page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((__typeof__(page) *)
__va(page))[address >>
PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
}
}
die("Oops", regs, writeaccess);
bust_spinlocks(0);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
}
/*
* Called with interrupts disabled.
*/
asmlinkage int __kprobes
handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
/*
* We don't take page faults for P1, P2, and parts of P4, these
* are always mapped, whether it be due to legacy behaviour in
* 29-bit mode, or due to PMB configuration in 32-bit mode.
*/
if (address >= P3SEG && address < P3_ADDR_MAX) {
pgd = pgd_offset_k(address);
} else {
if (unlikely(address >= TASK_SIZE || !current->mm))
return 1;
pgd = pgd_offset(current->mm, address);
}
pud = pud_offset(pgd, address);
if (pud_none_or_clear_bad(pud))
return 1;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 1;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry)))
return 1;
if (unlikely(writeaccess && !pte_write(entry)))
return 1;
if (writeaccess)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
set_pte(pte, entry);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
/*
* SH-4 does not set MMUCR.RC to the corresponding TLB entry in
* the case of an initial page write exception, so we need to
* flush it in order to avoid potential TLB entry duplication.
*/
if (writeaccess == 2)
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
#endif
update_mmu_cache(NULL, address, pte);
return 0;
}
| /*
* Page fault handler for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 - 2009 Paul Mundt
*
* Based on linux/arch/i386/mm/fault.c:
* Copyright (C) 1995 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
int ret = 0;
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, trap))
ret = 1;
preempt_enable();
}
return ret;
}
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd += index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
return NULL;
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
return NULL;
if (!pud_present(*pud))
set_pud(pud, *pud_k);
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k);
else {
/*
* The page tables are fully synchronised so there must
* be another reason for the fault. Return NULL here to
* signal that we have not taken care of the fault.
*/
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
return NULL;
}
return pmd_k;
}
/*
* Handle a fault on the vmalloc or module mapping area
*/
static noinline int vmalloc_fault(unsigned long address)
{
pgd_t *pgd_k;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc/module/P3 area: */
if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
return -1;
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "current" here. We might be inside
* an interrupt in the middle of a task switch..
*/
pgd_k = get_TTB();
pmd_k = vmalloc_sync_one(pgd_k, address);
if (!pmd_k)
return -1;
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
return 0;
}
static int fault_in_kernel_space(unsigned long address)
{
return address >= TASK_SIZE;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long vec;
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
int si_code;
int fault;
siginfo_t info;
tsk = current;
mm = tsk->mm;
si_code = SEGV_MAPERR;
vec = lookup_exception_vector();
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (vmalloc_fault(address) >= 0)
return;
if (notify_page_fault(regs, vec))
return;
goto bad_area_nosemaphore;
}
if (unlikely(notify_page_fault(regs, vec)))
return;
/* Only enable interrupts if they were on before the fault */
if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (in_atomic() || !mm)
goto no_context;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
if (writeaccess) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
if (handle_trapped_io(regs, address))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*
*/
bust_spinlocks(1);
if (oops_may_print()) {
unsigned long page;
if (address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel paging "
"request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08lx\n", regs->pc);
page = (unsigned long)get_TTB();
if (page) {
page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((__typeof__(page) *)
__va(page))[address >>
PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
}
}
die("Oops", regs, writeaccess);
bust_spinlocks(0);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)address;
force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
}
/*
* Called with interrupts disabled.
*/
asmlinkage int __kprobes
handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
/*
* We don't take page faults for P1, P2, and parts of P4, these
* are always mapped, whether it be due to legacy behaviour in
* 29-bit mode, or due to PMB configuration in 32-bit mode.
*/
if (address >= P3SEG && address < P3_ADDR_MAX) {
pgd = pgd_offset_k(address);
} else {
if (unlikely(address >= TASK_SIZE || !current->mm))
return 1;
pgd = pgd_offset(current->mm, address);
}
pud = pud_offset(pgd, address);
if (pud_none_or_clear_bad(pud))
return 1;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 1;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (unlikely(pte_none(entry) || pte_not_present(entry)))
return 1;
if (unlikely(writeaccess && !pte_write(entry)))
return 1;
if (writeaccess)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
set_pte(pte, entry);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
/*
* SH-4 does not set MMUCR.RC to the corresponding TLB entry in
* the case of an initial page write exception, so we need to
* flush it in order to avoid potential TLB entry duplication.
*/
if (writeaccess == 2)
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
#endif
update_mmu_cache(NULL, address, pte);
return 0;
}
|
218,343,022,370,686 | /*
* arch/sh/mm/tlb-flush_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
* Copyright (C) 2003 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/signal.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
extern void die(const char *,struct pt_regs *,long);
#define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
#define PPROT(flag) PFLAG(pgprot_val(prot),flag)
static inline void print_prots(pgprot_t prot)
{
printk("prot is 0x%016llx\n",pgprot_val(prot));
printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
}
static inline void print_vma(struct vm_area_struct *vma)
{
printk("vma start 0x%08lx\n", vma->vm_start);
printk("vma end 0x%08lx\n", vma->vm_end);
print_prots(vma->vm_page_prot);
printk("vm_flags 0x%08lx\n", vma->vm_flags);
}
static inline void print_task(struct task_struct *tsk)
{
printk("Task pid %d\n", task_pid_nr(tsk));
}
static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
{
pgd_t *dir;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
dir = pgd_offset(mm, address);
if (pgd_none(*dir))
return NULL;
pud = pud_offset(dir, address);
if (pud_none(*pud))
return NULL;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return NULL;
return pte;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
unsigned long textaccess, unsigned long address)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
const struct exception_table_entry *fixup;
pte_t *pte;
int fault;
/* SIM
* Note this is now called with interrupts still disabled
* This is to cope with being called for a missing IO port
* address with interrupts disabled. This should be fixed as
* soon as we have a better 'fast path' miss handler.
*
* Plus take care how you try and debug this stuff.
* For example, writing debug data to a port which you
* have just faulted on is not going to work.
*/
tsk = current;
mm = tsk->mm;
/* Not an IO address, so reenable interrupts */
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
/* TLB misses upon some cache flushes get done under cli() */
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
#endif
goto bad_area;
}
if (vma->vm_start <= address) {
goto good_area;
}
if (!(vma->vm_flags & VM_GROWSDOWN)) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
print_vma(vma);
#endif
goto bad_area;
}
if (expand_stack(vma, address)) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
#endif
goto bad_area;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (textaccess) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
} else {
if (writeaccess) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (!(vma->vm_flags & VM_READ))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
/* If we get here, the page fault has been handled. Do the TLB refill
now from the newly-setup PTE, to avoid having to fault again right
away on the same instruction. */
pte = lookup_pte (mm, address);
if (!pte) {
/* From empirical evidence, we can get here, due to
!pte_present(pte). (e.g. if a swap-in occurs, and the page
is swapped back out again before the process that wanted it
gets rescheduled?) */
goto no_pte;
}
__do_tlb_refill(address, textaccess, pte);
no_pte:
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
#ifdef DEBUG_FAULT
printk("fault:bad area\n");
#endif
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
static int count=0;
siginfo_t info;
if (count < 4) {
/* This is really to help debug faults when starting
* usermode, so only need a few */
count++;
printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
address, task_pid_nr(current), current->comm,
(unsigned long) regs->pc);
#if 0
show_regs(regs);
#endif
}
if (is_global_init(tsk)) {
panic("INIT had user mode bad_area\n");
}
tsk->thread.address = address;
tsk->thread.error_code = writeaccess;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
#ifdef DEBUG_FAULT
printk("fault:No context\n");
#endif
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*
*/
if (address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
die("Oops", regs, writeaccess);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
printk("fault:Do sigbus\n");
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
tsk->thread.address = address;
tsk->thread.error_code = writeaccess;
tsk->thread.trap_no = 14;
force_sig(SIGBUS, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
}
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
unsigned long long match, pteh=0, lpage;
unsigned long tlb;
/*
* Sign-extend based on neff.
*/
lpage = neff_sign_extend(page);
match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
match |= lpage;
for_each_itlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
if (pteh == match) {
__flush_tlb_slot(tlb);
break;
}
}
for_each_dtlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
if (pteh == match) {
__flush_tlb_slot(tlb);
break;
}
}
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
unsigned long flags;
if (vma->vm_mm) {
page &= PAGE_MASK;
local_irq_save(flags);
local_flush_tlb_one(get_asid(), page);
local_irq_restore(flags);
}
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long flags;
unsigned long long match, pteh=0, pteh_epn, pteh_low;
unsigned long tlb;
unsigned int cpu = smp_processor_id();
struct mm_struct *mm;
mm = vma->vm_mm;
if (cpu_context(cpu, mm) == NO_CONTEXT)
return;
local_irq_save(flags);
start &= PAGE_MASK;
end &= PAGE_MASK;
match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
/* Flush ITLB */
for_each_itlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
pteh_epn = pteh & PAGE_MASK;
pteh_low = pteh & ~PAGE_MASK;
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
__flush_tlb_slot(tlb);
}
/* Flush DTLB */
for_each_dtlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
pteh_epn = pteh & PAGE_MASK;
pteh_low = pteh & ~PAGE_MASK;
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
__flush_tlb_slot(tlb);
}
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
if (cpu_context(cpu, mm) == NO_CONTEXT)
return;
local_irq_save(flags);
cpu_context(cpu, mm) = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm, cpu);
local_irq_restore(flags);
}
void local_flush_tlb_all(void)
{
/* Invalidate all, including shared pages, excluding fixed TLBs */
unsigned long flags, tlb;
local_irq_save(flags);
/* Flush each ITLB entry */
for_each_itlb_entry(tlb)
__flush_tlb_slot(tlb);
/* Flush each DTLB entry */
for_each_dtlb_entry(tlb)
__flush_tlb_slot(tlb);
local_irq_restore(flags);
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
/* FIXME: Optimize this later.. */
flush_tlb_all();
}
void __flush_tlb_global(void)
{
flush_tlb_all();
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
}
| /*
* arch/sh/mm/tlb-flush_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
* Copyright (C) 2003 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/signal.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
extern void die(const char *,struct pt_regs *,long);
#define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
#define PPROT(flag) PFLAG(pgprot_val(prot),flag)
static inline void print_prots(pgprot_t prot)
{
printk("prot is 0x%016llx\n",pgprot_val(prot));
printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
}
static inline void print_vma(struct vm_area_struct *vma)
{
printk("vma start 0x%08lx\n", vma->vm_start);
printk("vma end 0x%08lx\n", vma->vm_end);
print_prots(vma->vm_page_prot);
printk("vm_flags 0x%08lx\n", vma->vm_flags);
}
static inline void print_task(struct task_struct *tsk)
{
printk("Task pid %d\n", task_pid_nr(tsk));
}
static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
{
pgd_t *dir;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
dir = pgd_offset(mm, address);
if (pgd_none(*dir))
return NULL;
pud = pud_offset(dir, address);
if (pud_none(*pud))
return NULL;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return NULL;
return pte;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
unsigned long textaccess, unsigned long address)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct * vma;
const struct exception_table_entry *fixup;
pte_t *pte;
int fault;
/* SIM
* Note this is now called with interrupts still disabled
* This is to cope with being called for a missing IO port
* address with interrupts disabled. This should be fixed as
* soon as we have a better 'fast path' miss handler.
*
* Plus take care how you try and debug this stuff.
* For example, writing debug data to a port which you
* have just faulted on is not going to work.
*/
tsk = current;
mm = tsk->mm;
/* Not an IO address, so reenable interrupts */
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
/* TLB misses upon some cache flushes get done under cli() */
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
#endif
goto bad_area;
}
if (vma->vm_start <= address) {
goto good_area;
}
if (!(vma->vm_flags & VM_GROWSDOWN)) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
print_vma(vma);
#endif
goto bad_area;
}
if (expand_stack(vma, address)) {
#ifdef DEBUG_FAULT
print_task(tsk);
printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
__func__, __LINE__,
address,regs->pc,textaccess,writeaccess);
show_regs(regs);
#endif
goto bad_area;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (textaccess) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
} else {
if (writeaccess) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (!(vma->vm_flags & VM_READ))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
/* If we get here, the page fault has been handled. Do the TLB refill
now from the newly-setup PTE, to avoid having to fault again right
away on the same instruction. */
pte = lookup_pte (mm, address);
if (!pte) {
/* From empirical evidence, we can get here, due to
!pte_present(pte). (e.g. if a swap-in occurs, and the page
is swapped back out again before the process that wanted it
gets rescheduled?) */
goto no_pte;
}
__do_tlb_refill(address, textaccess, pte);
no_pte:
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
#ifdef DEBUG_FAULT
printk("fault:bad area\n");
#endif
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
static int count=0;
siginfo_t info;
if (count < 4) {
/* This is really to help debug faults when starting
* usermode, so only need a few */
count++;
printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
address, task_pid_nr(current), current->comm,
(unsigned long) regs->pc);
#if 0
show_regs(regs);
#endif
}
if (is_global_init(tsk)) {
panic("INIT had user mode bad_area\n");
}
tsk->thread.address = address;
tsk->thread.error_code = writeaccess;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
#ifdef DEBUG_FAULT
printk("fault:No context\n");
#endif
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*
*/
if (address < PAGE_SIZE)
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
die("Oops", regs, writeaccess);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
printk("fault:Do sigbus\n");
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
tsk->thread.address = address;
tsk->thread.error_code = writeaccess;
tsk->thread.trap_no = 14;
force_sig(SIGBUS, tsk);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
}
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
unsigned long long match, pteh=0, lpage;
unsigned long tlb;
/*
* Sign-extend based on neff.
*/
lpage = neff_sign_extend(page);
match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
match |= lpage;
for_each_itlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
if (pteh == match) {
__flush_tlb_slot(tlb);
break;
}
}
for_each_dtlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
if (pteh == match) {
__flush_tlb_slot(tlb);
break;
}
}
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
unsigned long flags;
if (vma->vm_mm) {
page &= PAGE_MASK;
local_irq_save(flags);
local_flush_tlb_one(get_asid(), page);
local_irq_restore(flags);
}
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long flags;
unsigned long long match, pteh=0, pteh_epn, pteh_low;
unsigned long tlb;
unsigned int cpu = smp_processor_id();
struct mm_struct *mm;
mm = vma->vm_mm;
if (cpu_context(cpu, mm) == NO_CONTEXT)
return;
local_irq_save(flags);
start &= PAGE_MASK;
end &= PAGE_MASK;
match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
/* Flush ITLB */
for_each_itlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
pteh_epn = pteh & PAGE_MASK;
pteh_low = pteh & ~PAGE_MASK;
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
__flush_tlb_slot(tlb);
}
/* Flush DTLB */
for_each_dtlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
pteh_epn = pteh & PAGE_MASK;
pteh_low = pteh & ~PAGE_MASK;
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
__flush_tlb_slot(tlb);
}
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
if (cpu_context(cpu, mm) == NO_CONTEXT)
return;
local_irq_save(flags);
cpu_context(cpu, mm) = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm, cpu);
local_irq_restore(flags);
}
void local_flush_tlb_all(void)
{
/* Invalidate all, including shared pages, excluding fixed TLBs */
unsigned long flags, tlb;
local_irq_save(flags);
/* Flush each ITLB entry */
for_each_itlb_entry(tlb)
__flush_tlb_slot(tlb);
/* Flush each DTLB entry */
for_each_dtlb_entry(tlb)
__flush_tlb_slot(tlb);
local_irq_restore(flags);
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
/* FIXME: Optimize this later.. */
flush_tlb_all();
}
void __flush_tlb_global(void)
{
flush_tlb_all();
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
}
|
66,494,810,252,198 | /* Performance event support for sparc64.
*
* Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
*
* This code is based almost entirely upon the x86 perf event
* code, which is:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <asm/stacktrace.h>
#include <asm/cpudata.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
#include "kernel.h"
#include "kstack.h"
/* Sparc64 chips have two performance counters, 32-bits each, with
* overflow interrupts generated on transition from 0xffffffff to 0.
* The counters are accessed in one go using a 64-bit register.
*
* Both counters are controlled using a single control register. The
* only way to stop all sampling is to clear all of the context (user,
* supervisor, hypervisor) sampling enable bits. But these bits apply
* to both counters, thus the two counters can't be enabled/disabled
* individually.
*
* The control register has two event fields, one for each of the two
* counters. It's thus nearly impossible to have one counter going
* while keeping the other one stopped. Therefore it is possible to
* get overflow interrupts for counters not currently "in use" and
* that condition must be checked in the overflow interrupt handler.
*
* So we use a hack, in that we program inactive counters with the
* "sw_count0" and "sw_count1" events. These count how many times
* the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
* unusual way to encode a NOP and therefore will not trigger in
* normal code.
*/
#define MAX_HWEVENTS 2
#define MAX_PERIOD ((1UL << 32) - 1)
#define PIC_UPPER_INDEX 0
#define PIC_LOWER_INDEX 1
#define PIC_NO_INDEX -1
struct cpu_hw_events {
/* Number of events currently scheduled onto this cpu.
* This tells how many entries in the arrays below
* are valid.
*/
int n_events;
/* Number of new events added since the last hw_perf_disable().
* This works because the perf event layer always adds new
* events inside of a perf_{disable,enable}() sequence.
*/
int n_added;
/* Array of events current scheduled on this cpu. */
struct perf_event *event[MAX_HWEVENTS];
/* Array of encoded longs, specifying the %pcr register
* encoding and the mask of PIC counters this even can
* be scheduled on. See perf_event_encode() et al.
*/
unsigned long events[MAX_HWEVENTS];
/* The current counter index assigned to an event. When the
* event hasn't been programmed into the cpu yet, this will
* hold PIC_NO_INDEX. The event->hw.idx value tells us where
* we ought to schedule the event.
*/
int current_idx[MAX_HWEVENTS];
/* Software copy of %pcr register on this cpu. */
u64 pcr;
/* Enabled/disable state. */
int enabled;
unsigned int group_flag;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
/* An event map describes the characteristics of a performance
* counter event. In particular it gives the encoding as well as
* a mask telling which counters the event can be measured on.
*/
struct perf_event_map {
u16 encoding;
u8 pic_mask;
#define PIC_NONE 0x00
#define PIC_UPPER 0x01
#define PIC_LOWER 0x02
};
/* Encode a perf_event_map entry into a long. */
static unsigned long perf_event_encode(const struct perf_event_map *pmap)
{
return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
}
static u8 perf_event_get_msk(unsigned long val)
{
return val & 0xff;
}
static u64 perf_event_get_enc(unsigned long val)
{
return val >> 16;
}
#define C(x) PERF_COUNT_HW_CACHE_##x
#define CACHE_OP_UNSUPPORTED 0xfffe
#define CACHE_OP_NONSENSE 0xffff
typedef struct perf_event_map cache_map_t
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
struct sparc_pmu {
const struct perf_event_map *(*event_map)(int);
const cache_map_t *cache_map;
int max_events;
int upper_shift;
int lower_shift;
int event_mask;
int hv_bit;
int irq_bit;
int upper_nop;
int lower_nop;
};
static const struct perf_event_map ultra3_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};
static const struct perf_event_map *ultra3_event_map(int event_id)
{
return &ultra3_perfmon_event_map[event_id];
}
static const cache_map_t ultra3_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
[C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
[C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu ultra3_pmu = {
.event_map = ultra3_event_map,
.cache_map = &ultra3_cache_map,
.max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
.upper_shift = 11,
.lower_shift = 4,
.event_mask = 0x3f,
.upper_nop = 0x1c,
.lower_nop = 0x14,
};
/* Niagara1 is very limited. The upper PIC is hard-locked to count
* only instructions, so it is free running which creates all kinds of
* problems. Some hardware designs make one wonder if the creator
* even looked at how this stuff gets used by software.
*/
static const struct perf_event_map niagara1_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
};
static const struct perf_event_map *niagara1_event_map(int event_id)
{
return &niagara1_perfmon_event_map[event_id];
}
static const cache_map_t niagara1_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
[C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu niagara1_pmu = {
.event_map = niagara1_event_map,
.cache_map = &niagara1_cache_map,
.max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
.upper_shift = 0,
.lower_shift = 4,
.event_mask = 0x7,
.upper_nop = 0x0,
.lower_nop = 0x0,
};
static const struct perf_event_map niagara2_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
};
static const struct perf_event_map *niagara2_event_map(int event_id)
{
return &niagara2_perfmon_event_map[event_id];
}
static const cache_map_t niagara2_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu niagara2_pmu = {
.event_map = niagara2_event_map,
.cache_map = &niagara2_cache_map,
.max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
.upper_shift = 19,
.lower_shift = 6,
.event_mask = 0xfff,
.hv_bit = 0x8,
.irq_bit = 0x30,
.upper_nop = 0x220,
.lower_nop = 0x220,
};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
{
if (idx == PIC_UPPER_INDEX)
event_id <<= sparc_pmu->upper_shift;
else
event_id <<= sparc_pmu->lower_shift;
return event_id;
}
static u64 mask_for_index(int idx)
{
return event_encoding(sparc_pmu->event_mask, idx);
}
static u64 nop_for_index(int idx)
{
return event_encoding(idx == PIC_UPPER_INDEX ?
sparc_pmu->upper_nop :
sparc_pmu->lower_nop, idx);
}
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
u64 val, mask = mask_for_index(idx);
val = cpuc->pcr;
val &= ~mask;
val |= hwc->config;
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
}
static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
u64 mask = mask_for_index(idx);
u64 nop = nop_for_index(idx);
u64 val;
val = cpuc->pcr;
val &= ~mask;
val |= nop;
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
}
static u32 read_pmc(int idx)
{
u64 val;
read_pic(val);
if (idx == PIC_UPPER_INDEX)
val >>= 32;
return val & 0xffffffff;
}
static void write_pmc(int idx, u64 val)
{
u64 shift, mask, pic;
shift = 0;
if (idx == PIC_UPPER_INDEX)
shift = 32;
mask = ((u64) 0xffffffff) << shift;
val <<= shift;
read_pic(pic);
pic &= ~mask;
pic |= val;
write_pic(pic);
}
static u64 sparc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
int shift = 64 - 32;
u64 prev_raw_count, new_raw_count;
s64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static int sparc_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > MAX_PERIOD)
left = MAX_PERIOD;
local64_set(&hwc->prev_count, (u64)-left);
write_pmc(idx, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
/* If performance event entries have been added, move existing
* events around (if necessary) and then assign new entries to
* counters.
*/
static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
{
int i;
if (!cpuc->n_added)
goto out;
/* Read in the counters which are moving. */
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
if (cpuc->current_idx[i] != PIC_NO_INDEX &&
cpuc->current_idx[i] != cp->hw.idx) {
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
}
}
/* Assign to counters all unassigned events. */
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
enc = perf_event_get_enc(cpuc->events[i]);
pcr &= ~mask_for_index(idx);
if (hwc->state & PERF_HES_STOPPED)
pcr |= nop_for_index(idx);
else
pcr |= event_encoding(enc, idx);
}
out:
return pcr;
}
static void sparc_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 pcr;
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
pcr = cpuc->pcr;
if (!cpuc->n_events) {
pcr = 0;
} else {
pcr = maybe_change_configuration(cpuc, pcr);
/* We require that all of the events have the same
* configuration, so just fetch the settings from the
* first entry.
*/
cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
}
pcr_ops->write(cpuc->pcr);
}
static void sparc_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
val = cpuc->pcr;
val &= ~(PCR_UTRACE | PCR_STRACE |
sparc_pmu->hv_bit | sparc_pmu->irq_bit);
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
}
static int active_event_index(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
int i;
for (i = 0; i < cpuc->n_events; i++) {
if (cpuc->event[i] == event)
break;
}
BUG_ON(i == cpuc->n_events);
return cpuc->current_idx[i];
}
static void sparc_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
sparc_perf_event_set_period(event, &event->hw, idx);
}
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (!(event->hw.state & PERF_HES_STOPPED)) {
sparc_pmu_disable_event(cpuc, &event->hw, idx);
event->hw.state |= PERF_HES_STOPPED;
}
if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
sparc_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void sparc_pmu_del(struct perf_event *event, int _flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long flags;
int i;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
/* Absorb the final count and turn off the
* event.
*/
sparc_pmu_stop(event, PERF_EF_UPDATE);
/* Shift remaining entries down into
* the existing slot.
*/
while (++i < cpuc->n_events) {
cpuc->event[i - 1] = cpuc->event[i];
cpuc->events[i - 1] = cpuc->events[i];
cpuc->current_idx[i - 1] =
cpuc->current_idx[i];
}
perf_event_update_userpage(event);
cpuc->n_events--;
break;
}
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void sparc_pmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = active_event_index(cpuc, event);
struct hw_perf_event *hwc = &event->hw;
sparc_perf_event_update(event, hwc, idx);
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
static void perf_stop_nmi_watchdog(void *unused)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
stop_nmi_watchdog(NULL);
cpuc->pcr = pcr_ops->read();
}
void perf_event_grab_pmc(void)
{
if (atomic_inc_not_zero(&active_events))
return;
mutex_lock(&pmc_grab_mutex);
if (atomic_read(&active_events) == 0) {
if (atomic_read(&nmi_active) > 0) {
on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
BUG_ON(atomic_read(&nmi_active) != 0);
}
atomic_inc(&active_events);
}
mutex_unlock(&pmc_grab_mutex);
}
void perf_event_release_pmc(void)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
on_each_cpu(start_nmi_watchdog, NULL, 1);
mutex_unlock(&pmc_grab_mutex);
}
}
static const struct perf_event_map *sparc_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct perf_event_map *pmap;
if (!sparc_pmu->cache_map)
return ERR_PTR(-ENOENT);
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
if (pmap->encoding == CACHE_OP_UNSUPPORTED)
return ERR_PTR(-ENOENT);
if (pmap->encoding == CACHE_OP_NONSENSE)
return ERR_PTR(-EINVAL);
return pmap;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
perf_event_release_pmc();
}
/* Make sure all events can be scheduled into the hardware at
* the same time. This is simplified by the fact that we only
* need to support 2 simultaneous HW events.
*
* As a side effect, the evts[]->hw.idx values will be assigned
* on success. These are pending indexes. When the events are
* actually programmed into the chip, these values will propagate
* to the per-cpu cpuc->current_idx[] slots, see the code in
* maybe_change_configuration() for details.
*/
static int sparc_check_constraints(struct perf_event **evts,
unsigned long *events, int n_ev)
{
u8 msk0 = 0, msk1 = 0;
int idx0 = 0;
/* This case is possible when we are invoked from
* hw_perf_group_sched_in().
*/
if (!n_ev)
return 0;
if (n_ev > MAX_HWEVENTS)
return -1;
msk0 = perf_event_get_msk(events[0]);
if (n_ev == 1) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
BUG_ON(n_ev != 2);
msk1 = perf_event_get_msk(events[1]);
/* If both events can go on any counter, OK. */
if (msk0 == (PIC_UPPER | PIC_LOWER) &&
msk1 == (PIC_UPPER | PIC_LOWER))
goto success;
/* If one event is limited to a specific counter,
* and the other can go on both, OK.
*/
if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
msk1 == (PIC_UPPER | PIC_LOWER)) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
msk0 == (PIC_UPPER | PIC_LOWER)) {
if (msk1 & PIC_UPPER)
idx0 = 1;
goto success;
}
/* If the events are fixed to different counters, OK. */
if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
(msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
/* Otherwise, there is a conflict. */
return -1;
success:
evts[0]->hw.idx = idx0;
if (n_ev == 2)
evts[1]->hw.idx = idx0 ^ 1;
return 0;
}
static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
struct perf_event *event;
int i, n, first;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; i++) {
event = evts[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
return 0;
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *evts[], unsigned long *events,
int *current_idx)
{
struct perf_event *event;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
evts[n] = group;
events[n] = group->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
evts[n] = event;
events[n] = event->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
}
return n;
}
static int sparc_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= MAX_HWEVENTS)
goto out;
cpuc->event[n0] = event;
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
event->hw.state = PERF_HES_UPTODATE;
if (!(ef_flags & PERF_EF_START))
event->hw.state |= PERF_HES_STOPPED;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto nocheck;
if (check_excludes(cpuc->event, n0, 1))
goto out;
if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
goto out;
nocheck:
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
static int sparc_pmu_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct perf_event *evts[MAX_HWEVENTS];
struct hw_perf_event *hwc = &event->hw;
unsigned long events[MAX_HWEVENTS];
int current_idx_dmy[MAX_HWEVENTS];
const struct perf_event_map *pmap;
int n;
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
switch (attr->type) {
case PERF_TYPE_HARDWARE:
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
pmap = sparc_pmu->event_map(attr->config);
break;
case PERF_TYPE_HW_CACHE:
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
break;
case PERF_TYPE_RAW:
pmap = NULL;
break;
default:
return -ENOENT;
}
if (pmap) {
hwc->event_base = perf_event_encode(pmap);
} else {
/*
* User gives us "(encoding << 16) | pic_mask" for
* PERF_TYPE_RAW events.
*/
hwc->event_base = attr->config;
}
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user)
hwc->config_base |= PCR_UTRACE;
if (!attr->exclude_kernel)
hwc->config_base |= PCR_STRACE;
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
MAX_HWEVENTS - 1,
evts, events, current_idx_dmy);
if (n < 0)
return -EINVAL;
}
events[n] = hwc->event_base;
evts[n] = event;
if (check_excludes(evts, n, 1))
return -EINVAL;
if (sparc_check_constraints(evts, events, n + 1))
return -EINVAL;
hwc->idx = PIC_NO_INDEX;
/* Try to do all error checking before this point, as unwinding
* state after grabbing the PMC is difficult.
*/
perf_event_grab_pmc();
event->destroy = hw_perf_event_destroy;
if (!hwc->sample_period) {
hwc->sample_period = MAX_PERIOD;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void sparc_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int sparc_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
if (!sparc_pmu)
return -EINVAL;
cpuc = &__get_cpu_var(cpu_hw_events);
n = cpuc->n_events;
if (check_excludes(cpuc->event, 0, n))
return -EINVAL;
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN;
cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
static struct pmu pmu = {
.pmu_enable = sparc_pmu_enable,
.pmu_disable = sparc_pmu_disable,
.event_init = sparc_pmu_event_init,
.add = sparc_pmu_add,
.del = sparc_pmu_del,
.start = sparc_pmu_start,
.stop = sparc_pmu_stop,
.read = sparc_pmu_read,
.start_txn = sparc_pmu_start_txn,
.cancel_txn = sparc_pmu_cancel_txn,
.commit_txn = sparc_pmu_commit_txn,
};
void perf_event_print_debug(void)
{
unsigned long flags;
u64 pcr, pic;
int cpu;
if (!sparc_pmu)
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = pcr_ops->read();
read_pic(pic);
pr_info("\n");
pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
cpu, pcr, pic);
local_irq_restore(flags);
}
static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int i;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
regs = args->regs;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/* If the PMU has the TOE IRQ enable bits, we need to do a
* dummy write to the %pcr to clear the overflow bits and thus
* the interrupt.
*
* Do this before we peek at the counters to determine
* overflow so we don't lose any events.
*/
if (sparc_pmu->irq_bit)
pcr_ops->write(cpuc->pcr);
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *event = cpuc->event[i];
int idx = cpuc->current_idx[i];
struct hw_perf_event *hwc;
u64 val;
hwc = &event->hw;
val = sparc_perf_event_update(event, hwc, idx);
if (val & (1ULL << 31))
continue;
data.period = event->hw.last_period;
if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 1, &data, regs))
sparc_pmu_stop(event, 0);
}
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
};
static bool __init supported_pmu(void)
{
if (!strcmp(sparc_pmu_type, "ultra3") ||
!strcmp(sparc_pmu_type, "ultra3+") ||
!strcmp(sparc_pmu_type, "ultra3i") ||
!strcmp(sparc_pmu_type, "ultra4+")) {
sparc_pmu = &ultra3_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara")) {
sparc_pmu = &niagara1_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara2")) {
sparc_pmu = &niagara2_pmu;
return true;
}
return false;
}
int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
if (!supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
return 0;
}
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
register_die_notifier(&perf_event_nmi_notifier);
return 0;
}
early_initcall(init_hw_perf_events);
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ksp, fp;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph = 0;
#endif
stack_trace_flush();
perf_callchain_store(entry, regs->tpc);
ksp = regs->u_regs[UREG_I6];
fp = ksp + STACK_BIAS;
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
if (!kstack_valid(current_thread_info(), fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
if (kstack_is_trap_frame(current_thread_info(), regs)) {
if (user_mode(regs))
break;
pc = regs->tpc;
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
} else {
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
int index = current->curr_ret_stack;
if (current->ret_stack && index >= graph) {
pc = current->ret_stack[index - graph].ret;
perf_callchain_store(entry, pc);
graph++;
}
}
#endif
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ufp;
perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
struct sparc_stackf *usf, sf;
unsigned long pc;
usf = (struct sparc_stackf *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ufp;
perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
struct sparc_stackf32 *usf, sf;
unsigned long pc;
usf = (struct sparc_stackf32 *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
flushw_user();
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
else
perf_callchain_user_64(entry, regs);
}
| /* Performance event support for sparc64.
*
* Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
*
* This code is based almost entirely upon the x86 perf event
* code, which is:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <asm/stacktrace.h>
#include <asm/cpudata.h>
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
#include "kernel.h"
#include "kstack.h"
/* Sparc64 chips have two performance counters, 32-bits each, with
* overflow interrupts generated on transition from 0xffffffff to 0.
* The counters are accessed in one go using a 64-bit register.
*
* Both counters are controlled using a single control register. The
* only way to stop all sampling is to clear all of the context (user,
* supervisor, hypervisor) sampling enable bits. But these bits apply
* to both counters, thus the two counters can't be enabled/disabled
* individually.
*
* The control register has two event fields, one for each of the two
* counters. It's thus nearly impossible to have one counter going
* while keeping the other one stopped. Therefore it is possible to
* get overflow interrupts for counters not currently "in use" and
* that condition must be checked in the overflow interrupt handler.
*
* So we use a hack, in that we program inactive counters with the
* "sw_count0" and "sw_count1" events. These count how many times
* the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
* unusual way to encode a NOP and therefore will not trigger in
* normal code.
*/
#define MAX_HWEVENTS 2
#define MAX_PERIOD ((1UL << 32) - 1)
#define PIC_UPPER_INDEX 0
#define PIC_LOWER_INDEX 1
#define PIC_NO_INDEX -1
struct cpu_hw_events {
/* Number of events currently scheduled onto this cpu.
* This tells how many entries in the arrays below
* are valid.
*/
int n_events;
/* Number of new events added since the last hw_perf_disable().
* This works because the perf event layer always adds new
* events inside of a perf_{disable,enable}() sequence.
*/
int n_added;
/* Array of events current scheduled on this cpu. */
struct perf_event *event[MAX_HWEVENTS];
/* Array of encoded longs, specifying the %pcr register
* encoding and the mask of PIC counters this even can
* be scheduled on. See perf_event_encode() et al.
*/
unsigned long events[MAX_HWEVENTS];
/* The current counter index assigned to an event. When the
* event hasn't been programmed into the cpu yet, this will
* hold PIC_NO_INDEX. The event->hw.idx value tells us where
* we ought to schedule the event.
*/
int current_idx[MAX_HWEVENTS];
/* Software copy of %pcr register on this cpu. */
u64 pcr;
/* Enabled/disable state. */
int enabled;
unsigned int group_flag;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
/* An event map describes the characteristics of a performance
* counter event. In particular it gives the encoding as well as
* a mask telling which counters the event can be measured on.
*/
struct perf_event_map {
u16 encoding;
u8 pic_mask;
#define PIC_NONE 0x00
#define PIC_UPPER 0x01
#define PIC_LOWER 0x02
};
/* Encode a perf_event_map entry into a long. */
static unsigned long perf_event_encode(const struct perf_event_map *pmap)
{
return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
}
static u8 perf_event_get_msk(unsigned long val)
{
return val & 0xff;
}
static u64 perf_event_get_enc(unsigned long val)
{
return val >> 16;
}
#define C(x) PERF_COUNT_HW_CACHE_##x
#define CACHE_OP_UNSUPPORTED 0xfffe
#define CACHE_OP_NONSENSE 0xffff
typedef struct perf_event_map cache_map_t
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
struct sparc_pmu {
const struct perf_event_map *(*event_map)(int);
const cache_map_t *cache_map;
int max_events;
int upper_shift;
int lower_shift;
int event_mask;
int hv_bit;
int irq_bit;
int upper_nop;
int lower_nop;
};
static const struct perf_event_map ultra3_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};
static const struct perf_event_map *ultra3_event_map(int event_id)
{
return &ultra3_perfmon_event_map[event_id];
}
static const cache_map_t ultra3_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
[C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
[C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu ultra3_pmu = {
.event_map = ultra3_event_map,
.cache_map = &ultra3_cache_map,
.max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
.upper_shift = 11,
.lower_shift = 4,
.event_mask = 0x3f,
.upper_nop = 0x1c,
.lower_nop = 0x14,
};
/* Niagara1 is very limited. The upper PIC is hard-locked to count
* only instructions, so it is free running which creates all kinds of
* problems. Some hardware designs make one wonder if the creator
* even looked at how this stuff gets used by software.
*/
static const struct perf_event_map niagara1_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
};
static const struct perf_event_map *niagara1_event_map(int event_id)
{
return &niagara1_perfmon_event_map[event_id];
}
static const cache_map_t niagara1_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
[C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu niagara1_pmu = {
.event_map = niagara1_event_map,
.cache_map = &niagara1_cache_map,
.max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
.upper_shift = 0,
.lower_shift = 4,
.event_mask = 0x7,
.upper_nop = 0x0,
.lower_nop = 0x0,
};
static const struct perf_event_map niagara2_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
};
static const struct perf_event_map *niagara2_event_map(int event_id)
{
return &niagara2_perfmon_event_map[event_id];
}
static const cache_map_t niagara2_cache_map = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
[ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
[C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
[C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
[ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
},
},
};
static const struct sparc_pmu niagara2_pmu = {
.event_map = niagara2_event_map,
.cache_map = &niagara2_cache_map,
.max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
.upper_shift = 19,
.lower_shift = 6,
.event_mask = 0xfff,
.hv_bit = 0x8,
.irq_bit = 0x30,
.upper_nop = 0x220,
.lower_nop = 0x220,
};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
{
if (idx == PIC_UPPER_INDEX)
event_id <<= sparc_pmu->upper_shift;
else
event_id <<= sparc_pmu->lower_shift;
return event_id;
}
static u64 mask_for_index(int idx)
{
return event_encoding(sparc_pmu->event_mask, idx);
}
static u64 nop_for_index(int idx)
{
return event_encoding(idx == PIC_UPPER_INDEX ?
sparc_pmu->upper_nop :
sparc_pmu->lower_nop, idx);
}
static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
u64 val, mask = mask_for_index(idx);
val = cpuc->pcr;
val &= ~mask;
val |= hwc->config;
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
}
static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
{
u64 mask = mask_for_index(idx);
u64 nop = nop_for_index(idx);
u64 val;
val = cpuc->pcr;
val &= ~mask;
val |= nop;
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
}
static u32 read_pmc(int idx)
{
u64 val;
read_pic(val);
if (idx == PIC_UPPER_INDEX)
val >>= 32;
return val & 0xffffffff;
}
static void write_pmc(int idx, u64 val)
{
u64 shift, mask, pic;
shift = 0;
if (idx == PIC_UPPER_INDEX)
shift = 32;
mask = ((u64) 0xffffffff) << shift;
val <<= shift;
read_pic(pic);
pic &= ~mask;
pic |= val;
write_pic(pic);
}
static u64 sparc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
int shift = 64 - 32;
u64 prev_raw_count, new_raw_count;
s64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static int sparc_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > MAX_PERIOD)
left = MAX_PERIOD;
local64_set(&hwc->prev_count, (u64)-left);
write_pmc(idx, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
/* If performance event entries have been added, move existing
* events around (if necessary) and then assign new entries to
* counters.
*/
static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
{
int i;
if (!cpuc->n_added)
goto out;
/* Read in the counters which are moving. */
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
if (cpuc->current_idx[i] != PIC_NO_INDEX &&
cpuc->current_idx[i] != cp->hw.idx) {
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
}
}
/* Assign to counters all unassigned events. */
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
enc = perf_event_get_enc(cpuc->events[i]);
pcr &= ~mask_for_index(idx);
if (hwc->state & PERF_HES_STOPPED)
pcr |= nop_for_index(idx);
else
pcr |= event_encoding(enc, idx);
}
out:
return pcr;
}
static void sparc_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 pcr;
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
pcr = cpuc->pcr;
if (!cpuc->n_events) {
pcr = 0;
} else {
pcr = maybe_change_configuration(cpuc, pcr);
/* We require that all of the events have the same
* configuration, so just fetch the settings from the
* first entry.
*/
cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
}
pcr_ops->write(cpuc->pcr);
}
static void sparc_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
val = cpuc->pcr;
val &= ~(PCR_UTRACE | PCR_STRACE |
sparc_pmu->hv_bit | sparc_pmu->irq_bit);
cpuc->pcr = val;
pcr_ops->write(cpuc->pcr);
}
static int active_event_index(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
int i;
for (i = 0; i < cpuc->n_events; i++) {
if (cpuc->event[i] == event)
break;
}
BUG_ON(i == cpuc->n_events);
return cpuc->current_idx[i];
}
static void sparc_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
sparc_perf_event_set_period(event, &event->hw, idx);
}
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (!(event->hw.state & PERF_HES_STOPPED)) {
sparc_pmu_disable_event(cpuc, &event->hw, idx);
event->hw.state |= PERF_HES_STOPPED;
}
if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
sparc_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void sparc_pmu_del(struct perf_event *event, int _flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long flags;
int i;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
/* Absorb the final count and turn off the
* event.
*/
sparc_pmu_stop(event, PERF_EF_UPDATE);
/* Shift remaining entries down into
* the existing slot.
*/
while (++i < cpuc->n_events) {
cpuc->event[i - 1] = cpuc->event[i];
cpuc->events[i - 1] = cpuc->events[i];
cpuc->current_idx[i - 1] =
cpuc->current_idx[i];
}
perf_event_update_userpage(event);
cpuc->n_events--;
break;
}
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void sparc_pmu_read(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = active_event_index(cpuc, event);
struct hw_perf_event *hwc = &event->hw;
sparc_perf_event_update(event, hwc, idx);
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
static void perf_stop_nmi_watchdog(void *unused)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
stop_nmi_watchdog(NULL);
cpuc->pcr = pcr_ops->read();
}
void perf_event_grab_pmc(void)
{
if (atomic_inc_not_zero(&active_events))
return;
mutex_lock(&pmc_grab_mutex);
if (atomic_read(&active_events) == 0) {
if (atomic_read(&nmi_active) > 0) {
on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
BUG_ON(atomic_read(&nmi_active) != 0);
}
atomic_inc(&active_events);
}
mutex_unlock(&pmc_grab_mutex);
}
void perf_event_release_pmc(void)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
on_each_cpu(start_nmi_watchdog, NULL, 1);
mutex_unlock(&pmc_grab_mutex);
}
}
static const struct perf_event_map *sparc_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct perf_event_map *pmap;
if (!sparc_pmu->cache_map)
return ERR_PTR(-ENOENT);
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
if (pmap->encoding == CACHE_OP_UNSUPPORTED)
return ERR_PTR(-ENOENT);
if (pmap->encoding == CACHE_OP_NONSENSE)
return ERR_PTR(-EINVAL);
return pmap;
}
static void hw_perf_event_destroy(struct perf_event *event)
{
perf_event_release_pmc();
}
/* Make sure all events can be scheduled into the hardware at
* the same time. This is simplified by the fact that we only
* need to support 2 simultaneous HW events.
*
* As a side effect, the evts[]->hw.idx values will be assigned
* on success. These are pending indexes. When the events are
* actually programmed into the chip, these values will propagate
* to the per-cpu cpuc->current_idx[] slots, see the code in
* maybe_change_configuration() for details.
*/
static int sparc_check_constraints(struct perf_event **evts,
unsigned long *events, int n_ev)
{
u8 msk0 = 0, msk1 = 0;
int idx0 = 0;
/* This case is possible when we are invoked from
* hw_perf_group_sched_in().
*/
if (!n_ev)
return 0;
if (n_ev > MAX_HWEVENTS)
return -1;
msk0 = perf_event_get_msk(events[0]);
if (n_ev == 1) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
BUG_ON(n_ev != 2);
msk1 = perf_event_get_msk(events[1]);
/* If both events can go on any counter, OK. */
if (msk0 == (PIC_UPPER | PIC_LOWER) &&
msk1 == (PIC_UPPER | PIC_LOWER))
goto success;
/* If one event is limited to a specific counter,
* and the other can go on both, OK.
*/
if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
msk1 == (PIC_UPPER | PIC_LOWER)) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
msk0 == (PIC_UPPER | PIC_LOWER)) {
if (msk1 & PIC_UPPER)
idx0 = 1;
goto success;
}
/* If the events are fixed to different counters, OK. */
if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
(msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
if (msk0 & PIC_LOWER)
idx0 = 1;
goto success;
}
/* Otherwise, there is a conflict. */
return -1;
success:
evts[0]->hw.idx = idx0;
if (n_ev == 2)
evts[1]->hw.idx = idx0 ^ 1;
return 0;
}
static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
struct perf_event *event;
int i, n, first;
n = n_prev + n_new;
if (n <= 1)
return 0;
first = 1;
for (i = 0; i < n; i++) {
event = evts[i];
if (first) {
eu = event->attr.exclude_user;
ek = event->attr.exclude_kernel;
eh = event->attr.exclude_hv;
first = 0;
} else if (event->attr.exclude_user != eu ||
event->attr.exclude_kernel != ek ||
event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
return 0;
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *evts[], unsigned long *events,
int *current_idx)
{
struct perf_event *event;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
evts[n] = group;
events[n] = group->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
evts[n] = event;
events[n] = event->hw.event_base;
current_idx[n++] = PIC_NO_INDEX;
}
}
return n;
}
static int sparc_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= MAX_HWEVENTS)
goto out;
cpuc->event[n0] = event;
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
event->hw.state = PERF_HES_UPTODATE;
if (!(ef_flags & PERF_EF_START))
event->hw.state |= PERF_HES_STOPPED;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time(->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto nocheck;
if (check_excludes(cpuc->event, n0, 1))
goto out;
if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
goto out;
nocheck:
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
static int sparc_pmu_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct perf_event *evts[MAX_HWEVENTS];
struct hw_perf_event *hwc = &event->hw;
unsigned long events[MAX_HWEVENTS];
int current_idx_dmy[MAX_HWEVENTS];
const struct perf_event_map *pmap;
int n;
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
switch (attr->type) {
case PERF_TYPE_HARDWARE:
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
pmap = sparc_pmu->event_map(attr->config);
break;
case PERF_TYPE_HW_CACHE:
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
break;
case PERF_TYPE_RAW:
pmap = NULL;
break;
default:
return -ENOENT;
}
if (pmap) {
hwc->event_base = perf_event_encode(pmap);
} else {
/*
* User gives us "(encoding << 16) | pic_mask" for
* PERF_TYPE_RAW events.
*/
hwc->event_base = attr->config;
}
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user)
hwc->config_base |= PCR_UTRACE;
if (!attr->exclude_kernel)
hwc->config_base |= PCR_STRACE;
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
MAX_HWEVENTS - 1,
evts, events, current_idx_dmy);
if (n < 0)
return -EINVAL;
}
events[n] = hwc->event_base;
evts[n] = event;
if (check_excludes(evts, n, 1))
return -EINVAL;
if (sparc_check_constraints(evts, events, n + 1))
return -EINVAL;
hwc->idx = PIC_NO_INDEX;
/* Try to do all error checking before this point, as unwinding
* state after grabbing the PMC is difficult.
*/
perf_event_grab_pmc();
event->destroy = hw_perf_event_destroy;
if (!hwc->sample_period) {
hwc->sample_period = MAX_PERIOD;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void sparc_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int sparc_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int n;
if (!sparc_pmu)
return -EINVAL;
cpuc = &__get_cpu_var(cpu_hw_events);
n = cpuc->n_events;
if (check_excludes(cpuc->event, 0, n))
return -EINVAL;
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
return -EAGAIN;
cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
static struct pmu pmu = {
.pmu_enable = sparc_pmu_enable,
.pmu_disable = sparc_pmu_disable,
.event_init = sparc_pmu_event_init,
.add = sparc_pmu_add,
.del = sparc_pmu_del,
.start = sparc_pmu_start,
.stop = sparc_pmu_stop,
.read = sparc_pmu_read,
.start_txn = sparc_pmu_start_txn,
.cancel_txn = sparc_pmu_cancel_txn,
.commit_txn = sparc_pmu_commit_txn,
};
void perf_event_print_debug(void)
{
unsigned long flags;
u64 pcr, pic;
int cpu;
if (!sparc_pmu)
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = pcr_ops->read();
read_pic(pic);
pr_info("\n");
pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
cpu, pcr, pic);
local_irq_restore(flags);
}
static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int i;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
regs = args->regs;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/* If the PMU has the TOE IRQ enable bits, we need to do a
* dummy write to the %pcr to clear the overflow bits and thus
* the interrupt.
*
* Do this before we peek at the counters to determine
* overflow so we don't lose any events.
*/
if (sparc_pmu->irq_bit)
pcr_ops->write(cpuc->pcr);
for (i = 0; i < cpuc->n_events; i++) {
struct perf_event *event = cpuc->event[i];
int idx = cpuc->current_idx[i];
struct hw_perf_event *hwc;
u64 val;
hwc = &event->hw;
val = sparc_perf_event_update(event, hwc, idx);
if (val & (1ULL << 31))
continue;
data.period = event->hw.last_period;
if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
sparc_pmu_stop(event, 0);
}
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
};
static bool __init supported_pmu(void)
{
if (!strcmp(sparc_pmu_type, "ultra3") ||
!strcmp(sparc_pmu_type, "ultra3+") ||
!strcmp(sparc_pmu_type, "ultra3i") ||
!strcmp(sparc_pmu_type, "ultra4+")) {
sparc_pmu = &ultra3_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara")) {
sparc_pmu = &niagara1_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "niagara2")) {
sparc_pmu = &niagara2_pmu;
return true;
}
return false;
}
int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
if (!supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
return 0;
}
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
register_die_notifier(&perf_event_nmi_notifier);
return 0;
}
early_initcall(init_hw_perf_events);
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ksp, fp;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int graph = 0;
#endif
stack_trace_flush();
perf_callchain_store(entry, regs->tpc);
ksp = regs->u_regs[UREG_I6];
fp = ksp + STACK_BIAS;
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
if (!kstack_valid(current_thread_info(), fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
if (kstack_is_trap_frame(current_thread_info(), regs)) {
if (user_mode(regs))
break;
pc = regs->tpc;
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
} else {
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
int index = current->curr_ret_stack;
if (current->ret_stack && index >= graph) {
pc = current->ret_stack[index - graph].ret;
perf_callchain_store(entry, pc);
graph++;
}
}
#endif
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ufp;
perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
struct sparc_stackf *usf, sf;
unsigned long pc;
usf = (struct sparc_stackf *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long ufp;
perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
struct sparc_stackf32 *usf, sf;
unsigned long pc;
usf = (struct sparc_stackf32 *) ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp;
perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH);
}
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
flushw_user();
if (test_thread_flag(TIF_32BIT))
perf_callchain_user_32(entry, regs);
else
perf_callchain_user_64(entry, regs);
}
|
116,594,665,145,327 | /*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
both, /* Swap, ldstub, etc. */
fpload,
fpstore,
invalid,
};
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
if(!tmp)
return load;
else {
if(((insn>>19)&0x3f) == 15)
return both;
else
return store;
}
}
/* 8 = double-word, 4 = word, 2 = half-word */
static inline int decode_access_size(unsigned int insn)
{
insn = (insn >> 19) & 3;
if(!insn)
return 4;
else if(insn == 3)
return 8;
else if(insn == 2)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
return 4; /* just to keep gcc happy. */
}
}
/* 0x400000 = signed, 0 = unsigned */
static inline int decode_signedness(unsigned int insn)
{
return (insn & 0x400000);
}
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd)
{
if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
/* Wheee... */
__asm__ __volatile__("save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"restore; restore; restore; restore;\n\t"
"restore; restore; restore;\n\t");
}
}
static inline int sign_extend_imm13(int imm)
{
return imm << 19 >> 19;
}
static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 *win;
if(reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
/* Ho hum, the slightly complicated case. */
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
return win->locals[reg - 16]; /* yes, I know what this does... */
}
static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 __user *win;
unsigned long ret;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
/* Ho hum, the slightly complicated case. */
win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
if ((unsigned long)win & 3)
return -1;
if (get_user(ret, &win->locals[reg - 16]))
return -1;
return ret;
}
static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 *win;
if(reg < 16)
return ®s->u_regs[reg];
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
return &win->locals[reg - 16];
}
static unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
unsigned int rd = (insn >> 25) & 0x1f;
if(insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
}
unsigned long safe_compute_effective_address(struct pt_regs *regs,
unsigned int insn)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
unsigned int rd = (insn >> 25) & 0x1f;
if(insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd);
return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd);
return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
}
}
/* This is just to make gcc think panic does return... */
static void unaligned_panic(char *str)
{
panic(str);
}
/* una_asm.S */
extern int do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed);
extern int __do_int_store(unsigned long *dst_addr, int size,
unsigned long *src_val);
static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
struct pt_regs *regs)
{
unsigned long zero[2] = { 0, 0 };
unsigned long *src_val;
if (reg_num)
src_val = fetch_reg_addr(reg_num, regs);
else {
src_val = &zero[0];
if (size == 8)
zero[1] = fetch_reg(1, regs);
}
return __do_int_store(dst_addr, size, src_val);
}
extern void smp_capture(void);
extern void smp_release(void);
static inline void advance(struct pt_regs *regs)
{
regs->pc = regs->npc;
regs->npc += 4;
}
static inline int floating_point_load_or_store_p(unsigned int insn)
{
return (insn >> 24) & 1;
}
static inline int ok_for_kernel(unsigned int insn)
{
return !floating_point_load_or_store_p(insn);
}
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
unsigned long g2 = regs->u_regs [UREG_G2];
unsigned long fixup = search_extables_range(regs->pc, &g2);
if (!fixup) {
unsigned long address = compute_effective_address(regs, insn);
if(address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
} else
printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
printk(KERN_ALERT " at virtual address %08lx\n",address);
printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
(current->mm ? current->mm->context :
current->active_mm->context));
printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
/* Not reached */
}
regs->pc = fixup;
regs->npc = regs->pc + 4;
regs->u_regs [UREG_G2] = g2;
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(insn);
if(!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
regs->pc);
unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
} else {
unsigned long addr = compute_effective_address(regs, insn);
int err;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (err)
kernel_mna_trap_fault(regs, insn);
else
advance(regs);
}
}
static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
enum direction dir)
{
unsigned int reg;
int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
if ((regs->pc | regs->npc) & 3)
return 0;
/* Must access_ok() in all the necessary places. */
#define WINREG_ADDR(regnum) \
((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
reg = (insn >> 25) & 0x1f;
if (reg >= 16) {
if (!access_ok(check, WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
reg = (insn >> 14) & 0x1f;
if (reg >= 16) {
if (!access_ok(check, WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
if (!(insn & 0x2000)) {
reg = (insn & 0x1f);
if (reg >= 16) {
if (!access_ok(check, WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
}
#undef WINREG_ADDR
return 0;
}
static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
info.si_trapno = 0;
send_sig_info(SIGBUS, &info, current);
}
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir;
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
dir = decode_direction(insn);
if(!ok_for_user(regs, insn, dir)) {
goto kill_user;
} else {
int err, size = decode_access_size(insn);
unsigned long addr;
if(floating_point_load_or_store_p(insn)) {
printk("User FPU load/store unaligned unsupported.\n");
goto kill_user;
}
addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
case both:
/*
* This was supported in 2.4. However, we question
* the value of SWAP instruction across word boundaries.
*/
printk("Unaligned SWAP unsupported.\n");
err = -EFAULT;
break;
default:
unaligned_panic("Impossible user unaligned trap.");
goto out;
}
if (err)
goto kill_user;
else
advance(regs);
goto out;
}
kill_user:
user_mna_trap_fault(regs, insn);
out:
;
}
| /*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
both, /* Swap, ldstub, etc. */
fpload,
fpstore,
invalid,
};
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
if(!tmp)
return load;
else {
if(((insn>>19)&0x3f) == 15)
return both;
else
return store;
}
}
/* 8 = double-word, 4 = word, 2 = half-word */
static inline int decode_access_size(unsigned int insn)
{
insn = (insn >> 19) & 3;
if(!insn)
return 4;
else if(insn == 3)
return 8;
else if(insn == 2)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
return 4; /* just to keep gcc happy. */
}
}
/* 0x400000 = signed, 0 = unsigned */
static inline int decode_signedness(unsigned int insn)
{
return (insn & 0x400000);
}
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd)
{
if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
/* Wheee... */
__asm__ __volatile__("save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"save %sp, -0x40, %sp\n\t"
"restore; restore; restore; restore;\n\t"
"restore; restore; restore;\n\t");
}
}
static inline int sign_extend_imm13(int imm)
{
return imm << 19 >> 19;
}
static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 *win;
if(reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
/* Ho hum, the slightly complicated case. */
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
return win->locals[reg - 16]; /* yes, I know what this does... */
}
static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 __user *win;
unsigned long ret;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
/* Ho hum, the slightly complicated case. */
win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
if ((unsigned long)win & 3)
return -1;
if (get_user(ret, &win->locals[reg - 16]))
return -1;
return ret;
}
static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
struct reg_window32 *win;
if(reg < 16)
return ®s->u_regs[reg];
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
return &win->locals[reg - 16];
}
static unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
unsigned int rd = (insn >> 25) & 0x1f;
if(insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
}
unsigned long safe_compute_effective_address(struct pt_regs *regs,
unsigned int insn)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
unsigned int rd = (insn >> 25) & 0x1f;
if(insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd);
return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd);
return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
}
}
/* This is just to make gcc think panic does return... */
static void unaligned_panic(char *str)
{
panic(str);
}
/* una_asm.S */
extern int do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed);
extern int __do_int_store(unsigned long *dst_addr, int size,
unsigned long *src_val);
static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
struct pt_regs *regs)
{
unsigned long zero[2] = { 0, 0 };
unsigned long *src_val;
if (reg_num)
src_val = fetch_reg_addr(reg_num, regs);
else {
src_val = &zero[0];
if (size == 8)
zero[1] = fetch_reg(1, regs);
}
return __do_int_store(dst_addr, size, src_val);
}
extern void smp_capture(void);
extern void smp_release(void);
static inline void advance(struct pt_regs *regs)
{
regs->pc = regs->npc;
regs->npc += 4;
}
static inline int floating_point_load_or_store_p(unsigned int insn)
{
return (insn >> 24) & 1;
}
static inline int ok_for_kernel(unsigned int insn)
{
return !floating_point_load_or_store_p(insn);
}
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
unsigned long g2 = regs->u_regs [UREG_G2];
unsigned long fixup = search_extables_range(regs->pc, &g2);
if (!fixup) {
unsigned long address = compute_effective_address(regs, insn);
if(address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
} else
printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
printk(KERN_ALERT " at virtual address %08lx\n",address);
printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
(current->mm ? current->mm->context :
current->active_mm->context));
printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
/* Not reached */
}
regs->pc = fixup;
regs->npc = regs->pc + 4;
regs->u_regs [UREG_G2] = g2;
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(insn);
if(!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
regs->pc);
unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
} else {
unsigned long addr = compute_effective_address(regs, insn);
int err;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (err)
kernel_mna_trap_fault(regs, insn);
else
advance(regs);
}
}
static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
enum direction dir)
{
unsigned int reg;
int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
if ((regs->pc | regs->npc) & 3)
return 0;
/* Must access_ok() in all the necessary places. */
#define WINREG_ADDR(regnum) \
((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
reg = (insn >> 25) & 0x1f;
if (reg >= 16) {
if (!access_ok(check, WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
reg = (insn >> 14) & 0x1f;
if (reg >= 16) {
if (!access_ok(check, WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
if (!(insn & 0x2000)) {
reg = (insn & 0x1f);
if (reg >= 16) {
if (!access_ok(check, WINREG_ADDR(reg - 16), size))
return -EFAULT;
}
}
#undef WINREG_ADDR
return 0;
}
static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
info.si_trapno = 0;
send_sig_info(SIGBUS, &info, current);
}
asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir;
if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
(((insn >> 30) & 3) != 3))
goto kill_user;
dir = decode_direction(insn);
if(!ok_for_user(regs, insn, dir)) {
goto kill_user;
} else {
int err, size = decode_access_size(insn);
unsigned long addr;
if(floating_point_load_or_store_p(insn)) {
printk("User FPU load/store unaligned unsupported.\n");
goto kill_user;
}
addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
regs),
size, (unsigned long *) addr,
decode_signedness(insn));
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs);
break;
case both:
/*
* This was supported in 2.4. However, we question
* the value of SWAP instruction across word boundaries.
*/
printk("Unaligned SWAP unsupported.\n");
err = -EFAULT;
break;
default:
unaligned_panic("Impossible user unaligned trap.");
goto out;
}
if (err)
goto kill_user;
else
advance(regs);
goto out;
}
kill_user:
user_mna_trap_fault(regs, insn);
out:
;
}
|
132,049,581,851,025 | /*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
* Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <linux/ratelimit.h>
#include <asm/fpumacro.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
both, /* Swap, ldstub, cas, ... */
fpld,
fpst,
invalid,
};
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
if (!tmp)
return load;
else {
switch ((insn>>19)&0xf) {
case 15: /* swap* */
return both;
default:
return store;
}
}
}
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{
unsigned int tmp;
tmp = ((insn >> 19) & 0xf);
if (tmp == 11 || tmp == 14) /* ldx/stx */
return 8;
tmp &= 3;
if (!tmp)
return 4;
else if (tmp == 3)
return 16; /* ldd/std - Although it is actually 8 */
else if (tmp == 2)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end
* of this function without returning a value because
* die_if_kernel() is marked with attribute 'noreturn'.
* Alas, some versions do...
*/
return 0;
}
}
static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
{
if (insn & 0x800000) {
if (insn & 0x2000)
return (unsigned char)(regs->tstate >> 24); /* %asi */
else
return (unsigned char)(insn >> 5); /* imm_asi */
} else
return ASI_P;
}
/* 0x400000 = signed, 0 = unsigned */
static inline int decode_signedness(unsigned int insn)
{
return (insn & 0x400000);
}
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
}
}
static inline long sign_extend_imm13(long imm)
{
return imm << 51 >> 51;
}
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
}
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
if (reg < 16)
return ®s->u_regs[reg];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long *)&win32->locals[reg - 16];
} else {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
}
}
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
}
/* This is just to make gcc think die_if_kernel does return... */
static void __used unaligned_panic(char *str, struct pt_regs *regs)
{
die_if_kernel(str, regs);
}
extern int do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed, int asi);
extern int __do_int_store(unsigned long *dst_addr, int size,
unsigned long src_val, int asi);
static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
struct pt_regs *regs, int asi, int orig_asi)
{
unsigned long zero = 0;
unsigned long *src_val_p = &zero;
unsigned long src_val;
if (size == 16) {
size = 8;
zero = (((long)(reg_num ?
(unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
src_val_p = fetch_reg_addr(reg_num, regs);
}
src_val = *src_val_p;
if (unlikely(asi != orig_asi)) {
switch (size) {
case 2:
src_val = swab16(src_val);
break;
case 4:
src_val = swab32(src_val);
break;
case 8:
src_val = swab64(src_val);
break;
case 16:
default:
BUG();
break;
}
}
return __do_int_store(dst_addr, size, src_val, asi);
}
static inline void advance(struct pt_regs *regs)
{
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
}
static inline int floating_point_load_or_store_p(unsigned int insn)
{
return (insn >> 24) & 1;
}
static inline int ok_for_kernel(unsigned int insn)
{
return !floating_point_load_or_store_p(insn);
}
static void kernel_mna_trap_fault(int fixup_tstate_asi)
{
struct pt_regs *regs = current_thread_info()->kern_una_regs;
unsigned int insn = current_thread_info()->kern_una_insn;
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (!entry) {
unsigned long address;
address = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
if (address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference in mna handler");
} else
printk(KERN_ALERT "Unable to handle kernel paging "
"request in mna handler");
printk(KERN_ALERT " at virtual address %016lx\n",address);
printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
(current->mm ? CTX_HWBITS(current->mm->context) :
CTX_HWBITS(current->active_mm->context)));
printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
/* Not reached */
}
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
if (fixup_tstate_asi) {
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= (ASI_AIUS << 24UL);
}
}
static void log_unaligned(struct pt_regs *regs)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
if (__ratelimit(&ratelimit)) {
printk("Kernel unaligned access at TPC[%lx] %pS\n",
regs->tpc, (void *) regs->tpc);
}
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
current_thread_info()->kern_una_insn = insn;
orig_asi = asi = decode_asi(insn, regs);
/* If this is a {get,put}_user() on an unaligned userspace pointer,
* just signal a fault and do not log the event.
*/
if (asi == ASI_AIUS) {
kernel_mna_trap_fault(0);
return;
}
log_unaligned(regs);
if (!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel "
"at <%016lx>.\n", regs->tpc);
unaligned_panic("Kernel does fpu/atomic "
"unaligned load/store.", regs);
kernel_mna_trap_fault(0);
} else {
unsigned long addr, *reg_addr;
int err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
case ASI_AIUSL:
case ASI_PL:
case ASI_SL:
case ASI_PNFL:
case ASI_SNFL:
asi &= ~0x08;
break;
}
switch (dir) {
case load:
reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
err = do_int_load(reg_addr, size,
(unsigned long *) addr,
decode_signedness(insn), asi);
if (likely(!err) && unlikely(asi != orig_asi)) {
unsigned long val_in = *reg_addr;
switch (size) {
case 2:
val_in = swab16(val_in);
break;
case 4:
val_in = swab32(val_in);
break;
case 8:
val_in = swab64(val_in);
break;
case 16:
default:
BUG();
break;
}
*reg_addr = val_in;
}
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs,
asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (unlikely(err))
kernel_mna_trap_fault(1);
else
advance(regs);
}
}
static char popc_helper[] = {
0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4,
};
int handle_popc(u32 insn, struct pt_regs *regs)
{
u64 value;
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
} else {
maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
value = fetch_reg(insn & 0x1f, regs);
}
for (ret = 0, i = 0; i < 16; i++) {
ret += popc_helper[value & 0xf];
value >>= 4;
}
if (rd < 16) {
if (rd)
regs->u_regs[rd] = ret;
} else {
if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
advance(regs);
return 1;
}
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
extern void spitfire_data_access_exception(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void sun4v_data_access_exception(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
if (asi < 0x80) {
do_privact(regs);
return 1;
}
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
{
/* Need to convert endians */
u64 tmp = __swab64p(&first);
first = __swab64p(&second);
second = tmp;
break;
}
default:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
/* LDF, LDDF, LDQF */
u32 data[4] __attribute__ ((aligned(8)));
int size, i;
int err;
if (asi < 0x80) {
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
case 0x000000: size = 1; break;
case 0x100000: size = 4; break;
default: size = 2; break;
}
for (i = 0; i < size; i++)
data[i] = 0;
err = get_user (data[0], (u32 __user *) addr);
if (!err) {
for (i = 1; i < size; i++)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
u64 tmp;
switch (size) {
case 1: data[0] = le32_to_cpup(data + 0); break;
default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
break;
case 4: tmp = le64_to_cpup((u64 *)(data + 0));
*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
*(u64 *)(data + 2) = tmp;
break;
}
}
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current_thread_info()->fpsaved[0] |= flag;
}
advance(regs);
return 1;
}
void handle_ld_nf(u32 insn, struct pt_regs *regs)
{
int rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (test_thread_flag(TIF_32BIT)) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
} else {
put_user(0, (unsigned long __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, (unsigned long __user *) reg + 1);
}
advance(regs);
}
void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
u64 value;
u8 freg;
int flag;
struct fpustate *f = FPUSTATE;
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
u32 first, second;
int err;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
first = second = 0;
err = get_user(first, (u32 __user *)sfar);
if (!err)
err = get_user(second, (u32 __user *)(sfar + 4));
if (err) {
if (!(asi & 0x2))
goto daex;
first = second = 0;
}
save_and_clear_fpu();
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
value = (((u64)first) << 32) | second;
if (asi & 0x8) /* Little */
value = __swab64p(&value);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
*(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag;
} else {
daex:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
}
void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
u64 value;
u8 freg;
int flag;
struct fpustate *f = FPUSTATE;
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
value = 0;
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
save_and_clear_fpu();
if (current_thread_info()->fpsaved[0] & flag)
value = *(u64 *)&f->regs[freg];
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
value = __swab64p(&value); break;
default: goto daex;
}
if (put_user (value >> 32, (u32 __user *) sfar) ||
__put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex;
} else {
daex:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
}
| /*
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
* Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/asi.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/perf_event.h>
#include <linux/ratelimit.h>
#include <asm/fpumacro.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
both, /* Swap, ldstub, cas, ... */
fpld,
fpst,
invalid,
};
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
if (!tmp)
return load;
else {
switch ((insn>>19)&0xf) {
case 15: /* swap* */
return both;
default:
return store;
}
}
}
/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
{
unsigned int tmp;
tmp = ((insn >> 19) & 0xf);
if (tmp == 11 || tmp == 14) /* ldx/stx */
return 8;
tmp &= 3;
if (!tmp)
return 4;
else if (tmp == 3)
return 16; /* ldd/std - Although it is actually 8 */
else if (tmp == 2)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", regs);
/* GCC should never warn that control reaches the end
* of this function without returning a value because
* die_if_kernel() is marked with attribute 'noreturn'.
* Alas, some versions do...
*/
return 0;
}
}
static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
{
if (insn & 0x800000) {
if (insn & 0x2000)
return (unsigned char)(regs->tstate >> 24); /* %asi */
else
return (unsigned char)(insn >> 5); /* imm_asi */
} else
return ASI_P;
}
/* 0x400000 = signed, 0 = unsigned */
static inline int decode_signedness(unsigned int insn)
{
return (insn & 0x400000);
}
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
}
}
static inline long sign_extend_imm13(long imm)
{
return imm << 51 >> 51;
}
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
}
static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
{
if (reg < 16)
return ®s->u_regs[reg];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long *)&win32->locals[reg - 16];
} else {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
}
}
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
}
/* This is just to make gcc think die_if_kernel does return... */
static void __used unaligned_panic(char *str, struct pt_regs *regs)
{
die_if_kernel(str, regs);
}
extern int do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed, int asi);
extern int __do_int_store(unsigned long *dst_addr, int size,
unsigned long src_val, int asi);
static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
struct pt_regs *regs, int asi, int orig_asi)
{
unsigned long zero = 0;
unsigned long *src_val_p = &zero;
unsigned long src_val;
if (size == 16) {
size = 8;
zero = (((long)(reg_num ?
(unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
src_val_p = fetch_reg_addr(reg_num, regs);
}
src_val = *src_val_p;
if (unlikely(asi != orig_asi)) {
switch (size) {
case 2:
src_val = swab16(src_val);
break;
case 4:
src_val = swab32(src_val);
break;
case 8:
src_val = swab64(src_val);
break;
case 16:
default:
BUG();
break;
}
}
return __do_int_store(dst_addr, size, src_val, asi);
}
static inline void advance(struct pt_regs *regs)
{
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
}
static inline int floating_point_load_or_store_p(unsigned int insn)
{
return (insn >> 24) & 1;
}
static inline int ok_for_kernel(unsigned int insn)
{
return !floating_point_load_or_store_p(insn);
}
static void kernel_mna_trap_fault(int fixup_tstate_asi)
{
struct pt_regs *regs = current_thread_info()->kern_una_regs;
unsigned int insn = current_thread_info()->kern_una_insn;
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (!entry) {
unsigned long address;
address = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
if (address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference in mna handler");
} else
printk(KERN_ALERT "Unable to handle kernel paging "
"request in mna handler");
printk(KERN_ALERT " at virtual address %016lx\n",address);
printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
(current->mm ? CTX_HWBITS(current->mm->context) :
CTX_HWBITS(current->active_mm->context)));
printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
(current->mm ? (unsigned long) current->mm->pgd :
(unsigned long) current->active_mm->pgd));
die_if_kernel("Oops", regs);
/* Not reached */
}
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
if (fixup_tstate_asi) {
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= (ASI_AIUS << 24UL);
}
}
static void log_unaligned(struct pt_regs *regs)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
if (__ratelimit(&ratelimit)) {
printk("Kernel unaligned access at TPC[%lx] %pS\n",
regs->tpc, (void *) regs->tpc);
}
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
enum direction dir = decode_direction(insn);
int size = decode_access_size(regs, insn);
int orig_asi, asi;
current_thread_info()->kern_una_regs = regs;
current_thread_info()->kern_una_insn = insn;
orig_asi = asi = decode_asi(insn, regs);
/* If this is a {get,put}_user() on an unaligned userspace pointer,
* just signal a fault and do not log the event.
*/
if (asi == ASI_AIUS) {
kernel_mna_trap_fault(0);
return;
}
log_unaligned(regs);
if (!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel "
"at <%016lx>.\n", regs->tpc);
unaligned_panic("Kernel does fpu/atomic "
"unaligned load/store.", regs);
kernel_mna_trap_fault(0);
} else {
unsigned long addr, *reg_addr;
int err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
case ASI_AIUSL:
case ASI_PL:
case ASI_SL:
case ASI_PNFL:
case ASI_SNFL:
asi &= ~0x08;
break;
}
switch (dir) {
case load:
reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
err = do_int_load(reg_addr, size,
(unsigned long *) addr,
decode_signedness(insn), asi);
if (likely(!err) && unlikely(asi != orig_asi)) {
unsigned long val_in = *reg_addr;
switch (size) {
case 2:
val_in = swab16(val_in);
break;
case 4:
val_in = swab32(val_in);
break;
case 8:
val_in = swab64(val_in);
break;
case 16:
default:
BUG();
break;
}
*reg_addr = val_in;
}
break;
case store:
err = do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs,
asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
if (unlikely(err))
kernel_mna_trap_fault(1);
else
advance(regs);
}
}
static char popc_helper[] = {
0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4,
};
int handle_popc(u32 insn, struct pt_regs *regs)
{
u64 value;
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
} else {
maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
value = fetch_reg(insn & 0x1f, regs);
}
for (ret = 0, i = 0; i < 16; i++) {
ret += popc_helper[value & 0xf];
value >>= 4;
}
if (rd < 16) {
if (rd)
regs->u_regs[rd] = ret;
} else {
if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
advance(regs);
return 1;
}
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
extern void spitfire_data_access_exception(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void sun4v_data_access_exception(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
unsigned long addr = compute_effective_address(regs, insn, 0);
int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
struct fpustate *f = FPUSTATE;
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
if (insn & 0x200000) {
/* STQ */
u64 first = 0, second = 0;
if (current_thread_info()->fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
if (asi < 0x80) {
do_privact(regs);
return 1;
}
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
{
/* Need to convert endians */
u64 tmp = __swab64p(&first);
first = __swab64p(&second);
second = tmp;
break;
}
default:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
/* LDF, LDDF, LDQF */
u32 data[4] __attribute__ ((aligned(8)));
int size, i;
int err;
if (asi < 0x80) {
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
case 0x000000: size = 1; break;
case 0x100000: size = 4; break;
default: size = 2; break;
}
for (i = 0; i < size; i++)
data[i] = 0;
err = get_user (data[0], (u32 __user *) addr);
if (!err) {
for (i = 1; i < size; i++)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, addr, 0);
else
spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
u64 tmp;
switch (size) {
case 1: data[0] = le32_to_cpup(data + 0); break;
default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
break;
case 4: tmp = le64_to_cpup((u64 *)(data + 0));
*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
*(u64 *)(data + 2) = tmp;
break;
}
}
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current_thread_info()->fpsaved[0] |= flag;
}
advance(regs);
return 1;
}
void handle_ld_nf(u32 insn, struct pt_regs *regs)
{
int rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
reg[0] = 0;
if ((insn & 0x780000) == 0x180000)
reg[1] = 0;
} else if (test_thread_flag(TIF_32BIT)) {
put_user(0, (int __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, ((int __user *) reg) + 1);
} else {
put_user(0, (unsigned long __user *) reg);
if ((insn & 0x780000) == 0x180000)
put_user(0, (unsigned long __user *) reg + 1);
}
advance(regs);
}
void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
u64 value;
u8 freg;
int flag;
struct fpustate *f = FPUSTATE;
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
u32 first, second;
int err;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
first = second = 0;
err = get_user(first, (u32 __user *)sfar);
if (!err)
err = get_user(second, (u32 __user *)(sfar + 4));
if (err) {
if (!(asi & 0x2))
goto daex;
first = second = 0;
}
save_and_clear_fpu();
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
value = (((u64)first) << 32) | second;
if (asi & 0x8) /* Little */
value = __swab64p(&value);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
*(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag;
} else {
daex:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
}
void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
u64 value;
u8 freg;
int flag;
struct fpustate *f = FPUSTATE;
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
value = 0;
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
save_and_clear_fpu();
if (current_thread_info()->fpsaved[0] & flag)
value = *(u64 *)&f->regs[freg];
switch (asi) {
case ASI_P:
case ASI_S: break;
case ASI_PL:
case ASI_SL:
value = __swab64p(&value); break;
default: goto daex;
}
if (put_user (value >> 32, (u32 __user *) sfar) ||
__put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex;
} else {
daex:
if (tlb_type == hypervisor)
sun4v_data_access_exception(regs, sfar, sfsr);
else
spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
}
|
36,993,516,212,865 | /* visemul.c: Emulation of VIS instructions.
*
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/system.h>
#include <asm/fpumacro.h>
#include <asm/uaccess.h>
/* OPF field of various VIS instructions. */
/* 000111011 - four 16-bit packs */
#define FPACK16_OPF 0x03b
/* 000111010 - two 32-bit packs */
#define FPACK32_OPF 0x03a
/* 000111101 - four 16-bit packs */
#define FPACKFIX_OPF 0x03d
/* 001001101 - four 16-bit expands */
#define FEXPAND_OPF 0x04d
/* 001001011 - two 32-bit merges */
#define FPMERGE_OPF 0x04b
/* 000110001 - 8-by-16-bit partitoned product */
#define FMUL8x16_OPF 0x031
/* 000110011 - 8-by-16-bit upper alpha partitioned product */
#define FMUL8x16AU_OPF 0x033
/* 000110101 - 8-by-16-bit lower alpha partitioned product */
#define FMUL8x16AL_OPF 0x035
/* 000110110 - upper 8-by-16-bit partitioned product */
#define FMUL8SUx16_OPF 0x036
/* 000110111 - lower 8-by-16-bit partitioned product */
#define FMUL8ULx16_OPF 0x037
/* 000111000 - upper 8-by-16-bit partitioned product */
#define FMULD8SUx16_OPF 0x038
/* 000111001 - lower unsigned 8-by-16-bit partitioned product */
#define FMULD8ULx16_OPF 0x039
/* 000101000 - four 16-bit compare; set rd if src1 > src2 */
#define FCMPGT16_OPF 0x028
/* 000101100 - two 32-bit compare; set rd if src1 > src2 */
#define FCMPGT32_OPF 0x02c
/* 000100000 - four 16-bit compare; set rd if src1 <= src2 */
#define FCMPLE16_OPF 0x020
/* 000100100 - two 32-bit compare; set rd if src1 <= src2 */
#define FCMPLE32_OPF 0x024
/* 000100010 - four 16-bit compare; set rd if src1 != src2 */
#define FCMPNE16_OPF 0x022
/* 000100110 - two 32-bit compare; set rd if src1 != src2 */
#define FCMPNE32_OPF 0x026
/* 000101010 - four 16-bit compare; set rd if src1 == src2 */
#define FCMPEQ16_OPF 0x02a
/* 000101110 - two 32-bit compare; set rd if src1 == src2 */
#define FCMPEQ32_OPF 0x02e
/* 000000000 - Eight 8-bit edge boundary processing */
#define EDGE8_OPF 0x000
/* 000000001 - Eight 8-bit edge boundary processing, no CC */
#define EDGE8N_OPF 0x001
/* 000000010 - Eight 8-bit edge boundary processing, little-endian */
#define EDGE8L_OPF 0x002
/* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */
#define EDGE8LN_OPF 0x003
/* 000000100 - Four 16-bit edge boundary processing */
#define EDGE16_OPF 0x004
/* 000000101 - Four 16-bit edge boundary processing, no CC */
#define EDGE16N_OPF 0x005
/* 000000110 - Four 16-bit edge boundary processing, little-endian */
#define EDGE16L_OPF 0x006
/* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */
#define EDGE16LN_OPF 0x007
/* 000001000 - Two 32-bit edge boundary processing */
#define EDGE32_OPF 0x008
/* 000001001 - Two 32-bit edge boundary processing, no CC */
#define EDGE32N_OPF 0x009
/* 000001010 - Two 32-bit edge boundary processing, little-endian */
#define EDGE32L_OPF 0x00a
/* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */
#define EDGE32LN_OPF 0x00b
/* 000111110 - distance between 8 8-bit components */
#define PDIST_OPF 0x03e
/* 000010000 - convert 8-bit 3-D address to blocked byte address */
#define ARRAY8_OPF 0x010
/* 000010010 - convert 16-bit 3-D address to blocked byte address */
#define ARRAY16_OPF 0x012
/* 000010100 - convert 32-bit 3-D address to blocked byte address */
#define ARRAY32_OPF 0x014
/* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */
#define BMASK_OPF 0x019
/* 001001100 - Permute bytes as specified by GSR.MASK */
#define BSHUFFLE_OPF 0x04c
#define VIS_OPF_SHIFT 5
#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
#define RS1(INSN) (((INSN) >> 14) & 0x1f)
#define RS2(INSN) (((INSN) >> 0) & 0x1f)
#define RD(INSN) (((INSN) >> 25) & 0x1f)
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
}
}
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
}
static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
struct pt_regs *regs)
{
BUG_ON(reg < 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long __user *)&win32->locals[reg - 16];
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
}
}
static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg,
struct pt_regs *regs)
{
BUG_ON(reg >= 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
return ®s->u_regs[reg];
}
static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
{
if (rd < 16) {
unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs);
*rd_kern = val;
} else {
unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
if (test_thread_flag(TIF_32BIT))
__put_user((u32)val, (u32 __user *)rd_user);
else
__put_user(val, rd_user);
}
}
static inline unsigned long fpd_regval(struct fpustate *f,
unsigned int insn_regnum)
{
insn_regnum = (((insn_regnum & 1) << 5) |
(insn_regnum & 0x1e));
return *(unsigned long *) &f->regs[insn_regnum];
}
static inline unsigned long *fpd_regaddr(struct fpustate *f,
unsigned int insn_regnum)
{
insn_regnum = (((insn_regnum & 1) << 5) |
(insn_regnum & 0x1e));
return (unsigned long *) &f->regs[insn_regnum];
}
static inline unsigned int fps_regval(struct fpustate *f,
unsigned int insn_regnum)
{
return f->regs[insn_regnum];
}
static inline unsigned int *fps_regaddr(struct fpustate *f,
unsigned int insn_regnum)
{
return &f->regs[insn_regnum];
}
struct edge_tab {
u16 left, right;
};
static struct edge_tab edge8_tab[8] = {
{ 0xff, 0x80 },
{ 0x7f, 0xc0 },
{ 0x3f, 0xe0 },
{ 0x1f, 0xf0 },
{ 0x0f, 0xf8 },
{ 0x07, 0xfc },
{ 0x03, 0xfe },
{ 0x01, 0xff },
};
static struct edge_tab edge8_tab_l[8] = {
{ 0xff, 0x01 },
{ 0xfe, 0x03 },
{ 0xfc, 0x07 },
{ 0xf8, 0x0f },
{ 0xf0, 0x1f },
{ 0xe0, 0x3f },
{ 0xc0, 0x7f },
{ 0x80, 0xff },
};
static struct edge_tab edge16_tab[4] = {
{ 0xf, 0x8 },
{ 0x7, 0xc },
{ 0x3, 0xe },
{ 0x1, 0xf },
};
static struct edge_tab edge16_tab_l[4] = {
{ 0xf, 0x1 },
{ 0xe, 0x3 },
{ 0xc, 0x7 },
{ 0x8, 0xf },
};
static struct edge_tab edge32_tab[2] = {
{ 0x3, 0x2 },
{ 0x1, 0x3 },
};
static struct edge_tab edge32_tab_l[2] = {
{ 0x3, 0x1 },
{ 0x2, 0x3 },
};
static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val;
u16 left, right;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
orig_rs1 = rs1 = fetch_reg(RS1(insn), regs);
orig_rs2 = rs2 = fetch_reg(RS2(insn), regs);
if (test_thread_flag(TIF_32BIT)) {
rs1 = rs1 & 0xffffffff;
rs2 = rs2 & 0xffffffff;
}
switch (opf) {
default:
case EDGE8_OPF:
case EDGE8N_OPF:
left = edge8_tab[rs1 & 0x7].left;
right = edge8_tab[rs2 & 0x7].right;
break;
case EDGE8L_OPF:
case EDGE8LN_OPF:
left = edge8_tab_l[rs1 & 0x7].left;
right = edge8_tab_l[rs2 & 0x7].right;
break;
case EDGE16_OPF:
case EDGE16N_OPF:
left = edge16_tab[(rs1 >> 1) & 0x3].left;
right = edge16_tab[(rs2 >> 1) & 0x3].right;
break;
case EDGE16L_OPF:
case EDGE16LN_OPF:
left = edge16_tab_l[(rs1 >> 1) & 0x3].left;
right = edge16_tab_l[(rs2 >> 1) & 0x3].right;
break;
case EDGE32_OPF:
case EDGE32N_OPF:
left = edge32_tab[(rs1 >> 2) & 0x1].left;
right = edge32_tab[(rs2 >> 2) & 0x1].right;
break;
case EDGE32L_OPF:
case EDGE32LN_OPF:
left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
break;
}
if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
rd_val = right & left;
else
rd_val = left;
store_reg(regs, rd_val, RD(insn));
switch (opf) {
case EDGE8_OPF:
case EDGE8L_OPF:
case EDGE16_OPF:
case EDGE16L_OPF:
case EDGE32_OPF:
case EDGE32L_OPF: {
unsigned long ccr, tstate;
__asm__ __volatile__("subcc %1, %2, %%g0\n\t"
"rd %%ccr, %0"
: "=r" (ccr)
: "r" (orig_rs1), "r" (orig_rs2)
: "cc");
tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
regs->tstate = tstate | (ccr << 32UL);
}
}
}
static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
unsigned long rs1, rs2, rd_val;
unsigned int bits, bits_mask;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
rs1 = fetch_reg(RS1(insn), regs);
rs2 = fetch_reg(RS2(insn), regs);
bits = (rs2 > 5 ? 5 : rs2);
bits_mask = (1UL << bits) - 1UL;
rd_val = ((((rs1 >> 11) & 0x3) << 0) |
(((rs1 >> 33) & 0x3) << 2) |
(((rs1 >> 55) & 0x1) << 4) |
(((rs1 >> 13) & 0xf) << 5) |
(((rs1 >> 35) & 0xf) << 9) |
(((rs1 >> 56) & 0xf) << 13) |
(((rs1 >> 17) & bits_mask) << 17) |
(((rs1 >> 39) & bits_mask) << (17 + bits)) |
(((rs1 >> 60) & 0xf) << (17 + (2*bits))));
switch (opf) {
case ARRAY16_OPF:
rd_val <<= 1;
break;
case ARRAY32_OPF:
rd_val <<= 2;
}
store_reg(regs, rd_val, RD(insn));
}
static void bmask(struct pt_regs *regs, unsigned int insn)
{
unsigned long rs1, rs2, rd_val, gsr;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
rs1 = fetch_reg(RS1(insn), regs);
rs2 = fetch_reg(RS2(insn), regs);
rd_val = rs1 + rs2;
store_reg(regs, rd_val, RD(insn));
gsr = current_thread_info()->gsr[0] & 0xffffffff;
gsr |= rd_val << 32UL;
current_thread_info()->gsr[0] = gsr;
}
static void bshuffle(struct pt_regs *regs, unsigned int insn)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val;
unsigned long bmask, i;
bmask = current_thread_info()->gsr[0] >> 32UL;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0UL;
for (i = 0; i < 8; i++) {
unsigned long which = (bmask >> (i * 4)) & 0xf;
unsigned long byte;
if (which < 8)
byte = (rs1 >> (which * 8)) & 0xff;
else
byte = (rs2 >> ((which-8)*8)) & 0xff;
rd_val |= (byte << (i * 8));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
}
static void pdist(struct pt_regs *regs, unsigned int insn)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, *rd, rd_val;
unsigned long i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd = fpd_regaddr(f, RD(insn));
rd_val = *rd;
for (i = 0; i < 8; i++) {
s16 s1, s2;
s1 = (rs1 >> (56 - (i * 8))) & 0xff;
s2 = (rs2 >> (56 - (i * 8))) & 0xff;
/* Absolute value of difference. */
s1 -= s2;
if (s1 < 0)
s1 = ~s1 + 1;
rd_val += s1;
}
*rd = rd_val;
}
static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, gsr, scale, rd_val;
gsr = current_thread_info()->gsr[0];
scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f);
switch (opf) {
case FPACK16_OPF: {
unsigned long byte;
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
unsigned int val;
s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL;
int scaled = src << scale;
int from_fixed = scaled >> 7;
val = ((from_fixed < 0) ?
0 :
(from_fixed > 255) ?
255 : from_fixed);
rd_val |= (val << (8 * byte));
}
*fps_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPACK32_OPF: {
unsigned long word;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL);
for (word = 0; word < 2; word++) {
unsigned long val;
s32 src = (rs2 >> (word * 32UL));
s64 scaled = src << scale;
s64 from_fixed = scaled >> 23;
val = ((from_fixed < 0) ?
0 :
(from_fixed > 255) ?
255 : from_fixed);
rd_val |= (val << (32 * word));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPACKFIX_OPF: {
unsigned long word;
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (word = 0; word < 2; word++) {
long val;
s32 src = (rs2 >> (word * 32UL));
s64 scaled = src << scale;
s64 from_fixed = scaled >> 16;
val = ((from_fixed < -32768) ?
-32768 :
(from_fixed > 32767) ?
32767 : from_fixed);
rd_val |= ((val & 0xffff) << (word * 16));
}
*fps_regaddr(f, RD(insn)) = rd_val;
break;
}
case FEXPAND_OPF: {
unsigned long byte;
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
unsigned long val;
u8 src = (rs2 >> (byte * 8)) & 0xff;
val = src << 4;
rd_val |= (val << (byte * 16));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPMERGE_OPF: {
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = (((rs2 & 0x000000ff) << 0) |
((rs1 & 0x000000ff) << 8) |
((rs2 & 0x0000ff00) << 8) |
((rs1 & 0x0000ff00) << 16) |
((rs2 & 0x00ff0000) << 16) |
((rs1 & 0x00ff0000) << 24) |
((rs2 & 0xff000000) << 24) |
((rs1 & 0xff000000) << 32));
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
}
}
static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val;
switch (opf) {
case FMUL8x16_OPF: {
unsigned long byte;
rs1 = fps_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
s16 src2 = (rs2 >> (byte * 16)) & 0xffff;
u32 prod = src1 * src2;
u16 scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF: {
unsigned long byte;
s16 src2;
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0);
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
u32 prod = src1 * src2;
u16 scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF: {
unsigned long byte, ushift;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0;
for (byte = 0; byte < 4; byte++) {
u16 src1;
s16 src2;
u32 prod;
u16 scaled;
src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
src2 = ((rs2 >> (16 * byte)) & 0xffff);
prod = src1 * src2;
scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF: {
unsigned long byte, ushift;
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0;
for (byte = 0; byte < 2; byte++) {
u16 src1;
s16 src2;
u32 prod;
u16 scaled;
src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
src2 = ((rs2 >> (16 * byte)) & 0xffff);
prod = src1 * src2;
scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) <<
((byte * 32UL) + 7UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
}
}
static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val, i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
switch (opf) {
case FCMPGT16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a > b)
rd_val |= 1 << i;
}
break;
case FCMPGT32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a > b)
rd_val |= 1 << i;
}
break;
case FCMPLE16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a <= b)
rd_val |= 1 << i;
}
break;
case FCMPLE32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a <= b)
rd_val |= 1 << i;
}
break;
case FCMPNE16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a != b)
rd_val |= 1 << i;
}
break;
case FCMPNE32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a != b)
rd_val |= 1 << i;
}
break;
case FCMPEQ16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a == b)
rd_val |= 1 << i;
}
break;
case FCMPEQ32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a == b)
rd_val |= 1 << i;
}
break;
}
maybe_flush_windows(0, 0, RD(insn), 0);
store_reg(regs, rd_val, RD(insn));
}
/* Emulate the VIS instructions which are not implemented in
* hardware on Niagara.
*/
int vis_emul(struct pt_regs *regs, unsigned int insn)
{
unsigned long pc = regs->tpc;
unsigned int opf;
BUG_ON(regs->tstate & TSTATE_PRIV);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc))
return -EFAULT;
save_and_clear_fpu();
opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
switch (opf) {
default:
return -EINVAL;
/* Pixel Formatting Instructions. */
case FPACK16_OPF:
case FPACK32_OPF:
case FPACKFIX_OPF:
case FEXPAND_OPF:
case FPMERGE_OPF:
pformat(regs, insn, opf);
break;
/* Partitioned Multiply Instructions */
case FMUL8x16_OPF:
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF:
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF:
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF:
pmul(regs, insn, opf);
break;
/* Pixel Compare Instructions */
case FCMPGT16_OPF:
case FCMPGT32_OPF:
case FCMPLE16_OPF:
case FCMPLE32_OPF:
case FCMPNE16_OPF:
case FCMPNE32_OPF:
case FCMPEQ16_OPF:
case FCMPEQ32_OPF:
pcmp(regs, insn, opf);
break;
/* Edge Handling Instructions */
case EDGE8_OPF:
case EDGE8N_OPF:
case EDGE8L_OPF:
case EDGE8LN_OPF:
case EDGE16_OPF:
case EDGE16N_OPF:
case EDGE16L_OPF:
case EDGE16LN_OPF:
case EDGE32_OPF:
case EDGE32N_OPF:
case EDGE32L_OPF:
case EDGE32LN_OPF:
edge(regs, insn, opf);
break;
/* Pixel Component Distance */
case PDIST_OPF:
pdist(regs, insn);
break;
/* Three-Dimensional Array Addressing Instructions */
case ARRAY8_OPF:
case ARRAY16_OPF:
case ARRAY32_OPF:
array(regs, insn, opf);
break;
/* Byte Mask and Shuffle Instructions */
case BMASK_OPF:
bmask(regs, insn);
break;
case BSHUFFLE_OPF:
bshuffle(regs, insn);
break;
}
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 0;
}
| /* visemul.c: Emulation of VIS instructions.
*
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
#include <asm/system.h>
#include <asm/fpumacro.h>
#include <asm/uaccess.h>
/* OPF field of various VIS instructions. */
/* 000111011 - four 16-bit packs */
#define FPACK16_OPF 0x03b
/* 000111010 - two 32-bit packs */
#define FPACK32_OPF 0x03a
/* 000111101 - four 16-bit packs */
#define FPACKFIX_OPF 0x03d
/* 001001101 - four 16-bit expands */
#define FEXPAND_OPF 0x04d
/* 001001011 - two 32-bit merges */
#define FPMERGE_OPF 0x04b
/* 000110001 - 8-by-16-bit partitoned product */
#define FMUL8x16_OPF 0x031
/* 000110011 - 8-by-16-bit upper alpha partitioned product */
#define FMUL8x16AU_OPF 0x033
/* 000110101 - 8-by-16-bit lower alpha partitioned product */
#define FMUL8x16AL_OPF 0x035
/* 000110110 - upper 8-by-16-bit partitioned product */
#define FMUL8SUx16_OPF 0x036
/* 000110111 - lower 8-by-16-bit partitioned product */
#define FMUL8ULx16_OPF 0x037
/* 000111000 - upper 8-by-16-bit partitioned product */
#define FMULD8SUx16_OPF 0x038
/* 000111001 - lower unsigned 8-by-16-bit partitioned product */
#define FMULD8ULx16_OPF 0x039
/* 000101000 - four 16-bit compare; set rd if src1 > src2 */
#define FCMPGT16_OPF 0x028
/* 000101100 - two 32-bit compare; set rd if src1 > src2 */
#define FCMPGT32_OPF 0x02c
/* 000100000 - four 16-bit compare; set rd if src1 <= src2 */
#define FCMPLE16_OPF 0x020
/* 000100100 - two 32-bit compare; set rd if src1 <= src2 */
#define FCMPLE32_OPF 0x024
/* 000100010 - four 16-bit compare; set rd if src1 != src2 */
#define FCMPNE16_OPF 0x022
/* 000100110 - two 32-bit compare; set rd if src1 != src2 */
#define FCMPNE32_OPF 0x026
/* 000101010 - four 16-bit compare; set rd if src1 == src2 */
#define FCMPEQ16_OPF 0x02a
/* 000101110 - two 32-bit compare; set rd if src1 == src2 */
#define FCMPEQ32_OPF 0x02e
/* 000000000 - Eight 8-bit edge boundary processing */
#define EDGE8_OPF 0x000
/* 000000001 - Eight 8-bit edge boundary processing, no CC */
#define EDGE8N_OPF 0x001
/* 000000010 - Eight 8-bit edge boundary processing, little-endian */
#define EDGE8L_OPF 0x002
/* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */
#define EDGE8LN_OPF 0x003
/* 000000100 - Four 16-bit edge boundary processing */
#define EDGE16_OPF 0x004
/* 000000101 - Four 16-bit edge boundary processing, no CC */
#define EDGE16N_OPF 0x005
/* 000000110 - Four 16-bit edge boundary processing, little-endian */
#define EDGE16L_OPF 0x006
/* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */
#define EDGE16LN_OPF 0x007
/* 000001000 - Two 32-bit edge boundary processing */
#define EDGE32_OPF 0x008
/* 000001001 - Two 32-bit edge boundary processing, no CC */
#define EDGE32N_OPF 0x009
/* 000001010 - Two 32-bit edge boundary processing, little-endian */
#define EDGE32L_OPF 0x00a
/* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */
#define EDGE32LN_OPF 0x00b
/* 000111110 - distance between 8 8-bit components */
#define PDIST_OPF 0x03e
/* 000010000 - convert 8-bit 3-D address to blocked byte address */
#define ARRAY8_OPF 0x010
/* 000010010 - convert 16-bit 3-D address to blocked byte address */
#define ARRAY16_OPF 0x012
/* 000010100 - convert 32-bit 3-D address to blocked byte address */
#define ARRAY32_OPF 0x014
/* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */
#define BMASK_OPF 0x019
/* 001001100 - Permute bytes as specified by GSR.MASK */
#define BSHUFFLE_OPF 0x04c
#define VIS_OPF_SHIFT 5
#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT)
#define RS1(INSN) (((INSN) >> 14) & 0x1f)
#define RS2(INSN) (((INSN) >> 0) & 0x1f)
#define RD(INSN) (((INSN) >> 25) & 0x1f)
static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
unsigned int rd, int from_kernel)
{
if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
if (from_kernel != 0)
__asm__ __volatile__("flushw");
else
flushw_user();
}
}
static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
{
unsigned long value;
if (reg < 16)
return (!reg ? 0 : regs->u_regs[reg]);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
} else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(value, &win->locals[reg - 16]);
}
return value;
}
static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg,
struct pt_regs *regs)
{
BUG_ON(reg < 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long __user *)&win32->locals[reg - 16];
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
}
}
static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg,
struct pt_regs *regs)
{
BUG_ON(reg >= 16);
BUG_ON(regs->tstate & TSTATE_PRIV);
return ®s->u_regs[reg];
}
static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd)
{
if (rd < 16) {
unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs);
*rd_kern = val;
} else {
unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs);
if (test_thread_flag(TIF_32BIT))
__put_user((u32)val, (u32 __user *)rd_user);
else
__put_user(val, rd_user);
}
}
static inline unsigned long fpd_regval(struct fpustate *f,
unsigned int insn_regnum)
{
insn_regnum = (((insn_regnum & 1) << 5) |
(insn_regnum & 0x1e));
return *(unsigned long *) &f->regs[insn_regnum];
}
static inline unsigned long *fpd_regaddr(struct fpustate *f,
unsigned int insn_regnum)
{
insn_regnum = (((insn_regnum & 1) << 5) |
(insn_regnum & 0x1e));
return (unsigned long *) &f->regs[insn_regnum];
}
static inline unsigned int fps_regval(struct fpustate *f,
unsigned int insn_regnum)
{
return f->regs[insn_regnum];
}
static inline unsigned int *fps_regaddr(struct fpustate *f,
unsigned int insn_regnum)
{
return &f->regs[insn_regnum];
}
struct edge_tab {
u16 left, right;
};
static struct edge_tab edge8_tab[8] = {
{ 0xff, 0x80 },
{ 0x7f, 0xc0 },
{ 0x3f, 0xe0 },
{ 0x1f, 0xf0 },
{ 0x0f, 0xf8 },
{ 0x07, 0xfc },
{ 0x03, 0xfe },
{ 0x01, 0xff },
};
static struct edge_tab edge8_tab_l[8] = {
{ 0xff, 0x01 },
{ 0xfe, 0x03 },
{ 0xfc, 0x07 },
{ 0xf8, 0x0f },
{ 0xf0, 0x1f },
{ 0xe0, 0x3f },
{ 0xc0, 0x7f },
{ 0x80, 0xff },
};
static struct edge_tab edge16_tab[4] = {
{ 0xf, 0x8 },
{ 0x7, 0xc },
{ 0x3, 0xe },
{ 0x1, 0xf },
};
static struct edge_tab edge16_tab_l[4] = {
{ 0xf, 0x1 },
{ 0xe, 0x3 },
{ 0xc, 0x7 },
{ 0x8, 0xf },
};
static struct edge_tab edge32_tab[2] = {
{ 0x3, 0x2 },
{ 0x1, 0x3 },
};
static struct edge_tab edge32_tab_l[2] = {
{ 0x3, 0x1 },
{ 0x2, 0x3 },
};
static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val;
u16 left, right;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
orig_rs1 = rs1 = fetch_reg(RS1(insn), regs);
orig_rs2 = rs2 = fetch_reg(RS2(insn), regs);
if (test_thread_flag(TIF_32BIT)) {
rs1 = rs1 & 0xffffffff;
rs2 = rs2 & 0xffffffff;
}
switch (opf) {
default:
case EDGE8_OPF:
case EDGE8N_OPF:
left = edge8_tab[rs1 & 0x7].left;
right = edge8_tab[rs2 & 0x7].right;
break;
case EDGE8L_OPF:
case EDGE8LN_OPF:
left = edge8_tab_l[rs1 & 0x7].left;
right = edge8_tab_l[rs2 & 0x7].right;
break;
case EDGE16_OPF:
case EDGE16N_OPF:
left = edge16_tab[(rs1 >> 1) & 0x3].left;
right = edge16_tab[(rs2 >> 1) & 0x3].right;
break;
case EDGE16L_OPF:
case EDGE16LN_OPF:
left = edge16_tab_l[(rs1 >> 1) & 0x3].left;
right = edge16_tab_l[(rs2 >> 1) & 0x3].right;
break;
case EDGE32_OPF:
case EDGE32N_OPF:
left = edge32_tab[(rs1 >> 2) & 0x1].left;
right = edge32_tab[(rs2 >> 2) & 0x1].right;
break;
case EDGE32L_OPF:
case EDGE32LN_OPF:
left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
break;
}
if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
rd_val = right & left;
else
rd_val = left;
store_reg(regs, rd_val, RD(insn));
switch (opf) {
case EDGE8_OPF:
case EDGE8L_OPF:
case EDGE16_OPF:
case EDGE16L_OPF:
case EDGE32_OPF:
case EDGE32L_OPF: {
unsigned long ccr, tstate;
__asm__ __volatile__("subcc %1, %2, %%g0\n\t"
"rd %%ccr, %0"
: "=r" (ccr)
: "r" (orig_rs1), "r" (orig_rs2)
: "cc");
tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
regs->tstate = tstate | (ccr << 32UL);
}
}
}
static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
unsigned long rs1, rs2, rd_val;
unsigned int bits, bits_mask;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
rs1 = fetch_reg(RS1(insn), regs);
rs2 = fetch_reg(RS2(insn), regs);
bits = (rs2 > 5 ? 5 : rs2);
bits_mask = (1UL << bits) - 1UL;
rd_val = ((((rs1 >> 11) & 0x3) << 0) |
(((rs1 >> 33) & 0x3) << 2) |
(((rs1 >> 55) & 0x1) << 4) |
(((rs1 >> 13) & 0xf) << 5) |
(((rs1 >> 35) & 0xf) << 9) |
(((rs1 >> 56) & 0xf) << 13) |
(((rs1 >> 17) & bits_mask) << 17) |
(((rs1 >> 39) & bits_mask) << (17 + bits)) |
(((rs1 >> 60) & 0xf) << (17 + (2*bits))));
switch (opf) {
case ARRAY16_OPF:
rd_val <<= 1;
break;
case ARRAY32_OPF:
rd_val <<= 2;
}
store_reg(regs, rd_val, RD(insn));
}
static void bmask(struct pt_regs *regs, unsigned int insn)
{
unsigned long rs1, rs2, rd_val, gsr;
maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0);
rs1 = fetch_reg(RS1(insn), regs);
rs2 = fetch_reg(RS2(insn), regs);
rd_val = rs1 + rs2;
store_reg(regs, rd_val, RD(insn));
gsr = current_thread_info()->gsr[0] & 0xffffffff;
gsr |= rd_val << 32UL;
current_thread_info()->gsr[0] = gsr;
}
static void bshuffle(struct pt_regs *regs, unsigned int insn)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val;
unsigned long bmask, i;
bmask = current_thread_info()->gsr[0] >> 32UL;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0UL;
for (i = 0; i < 8; i++) {
unsigned long which = (bmask >> (i * 4)) & 0xf;
unsigned long byte;
if (which < 8)
byte = (rs1 >> (which * 8)) & 0xff;
else
byte = (rs2 >> ((which-8)*8)) & 0xff;
rd_val |= (byte << (i * 8));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
}
static void pdist(struct pt_regs *regs, unsigned int insn)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, *rd, rd_val;
unsigned long i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd = fpd_regaddr(f, RD(insn));
rd_val = *rd;
for (i = 0; i < 8; i++) {
s16 s1, s2;
s1 = (rs1 >> (56 - (i * 8))) & 0xff;
s2 = (rs2 >> (56 - (i * 8))) & 0xff;
/* Absolute value of difference. */
s1 -= s2;
if (s1 < 0)
s1 = ~s1 + 1;
rd_val += s1;
}
*rd = rd_val;
}
static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, gsr, scale, rd_val;
gsr = current_thread_info()->gsr[0];
scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f);
switch (opf) {
case FPACK16_OPF: {
unsigned long byte;
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
unsigned int val;
s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL;
int scaled = src << scale;
int from_fixed = scaled >> 7;
val = ((from_fixed < 0) ?
0 :
(from_fixed > 255) ?
255 : from_fixed);
rd_val |= (val << (8 * byte));
}
*fps_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPACK32_OPF: {
unsigned long word;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL);
for (word = 0; word < 2; word++) {
unsigned long val;
s32 src = (rs2 >> (word * 32UL));
s64 scaled = src << scale;
s64 from_fixed = scaled >> 23;
val = ((from_fixed < 0) ?
0 :
(from_fixed > 255) ?
255 : from_fixed);
rd_val |= (val << (32 * word));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPACKFIX_OPF: {
unsigned long word;
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (word = 0; word < 2; word++) {
long val;
s32 src = (rs2 >> (word * 32UL));
s64 scaled = src << scale;
s64 from_fixed = scaled >> 16;
val = ((from_fixed < -32768) ?
-32768 :
(from_fixed > 32767) ?
32767 : from_fixed);
rd_val |= ((val & 0xffff) << (word * 16));
}
*fps_regaddr(f, RD(insn)) = rd_val;
break;
}
case FEXPAND_OPF: {
unsigned long byte;
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
unsigned long val;
u8 src = (rs2 >> (byte * 8)) & 0xff;
val = src << 4;
rd_val |= (val << (byte * 16));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FPMERGE_OPF: {
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = (((rs2 & 0x000000ff) << 0) |
((rs1 & 0x000000ff) << 8) |
((rs2 & 0x0000ff00) << 8) |
((rs1 & 0x0000ff00) << 16) |
((rs2 & 0x00ff0000) << 16) |
((rs1 & 0x00ff0000) << 24) |
((rs2 & 0xff000000) << 24) |
((rs1 & 0xff000000) << 32));
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
}
}
static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val;
switch (opf) {
case FMUL8x16_OPF: {
unsigned long byte;
rs1 = fps_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
s16 src2 = (rs2 >> (byte * 16)) & 0xffff;
u32 prod = src1 * src2;
u16 scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF: {
unsigned long byte;
s16 src2;
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0);
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
u32 prod = src1 * src2;
u16 scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF: {
unsigned long byte, ushift;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0;
for (byte = 0; byte < 4; byte++) {
u16 src1;
s16 src2;
u32 prod;
u16 scaled;
src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
src2 = ((rs2 >> (16 * byte)) & 0xffff);
prod = src1 * src2;
scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) << (byte * 16UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF: {
unsigned long byte, ushift;
rs1 = fps_regval(f, RS1(insn));
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0;
for (byte = 0; byte < 2; byte++) {
u16 src1;
s16 src2;
u32 prod;
u16 scaled;
src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff);
src2 = ((rs2 >> (16 * byte)) & 0xffff);
prod = src1 * src2;
scaled = ((prod & 0x00ffff00) >> 8);
/* Round up. */
if (prod & 0x80)
scaled++;
rd_val |= ((scaled & 0xffffUL) <<
((byte * 32UL) + 7UL));
}
*fpd_regaddr(f, RD(insn)) = rd_val;
break;
}
}
}
static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
{
struct fpustate *f = FPUSTATE;
unsigned long rs1, rs2, rd_val, i;
rs1 = fpd_regval(f, RS1(insn));
rs2 = fpd_regval(f, RS2(insn));
rd_val = 0;
switch (opf) {
case FCMPGT16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a > b)
rd_val |= 1 << i;
}
break;
case FCMPGT32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a > b)
rd_val |= 1 << i;
}
break;
case FCMPLE16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a <= b)
rd_val |= 1 << i;
}
break;
case FCMPLE32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a <= b)
rd_val |= 1 << i;
}
break;
case FCMPNE16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a != b)
rd_val |= 1 << i;
}
break;
case FCMPNE32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a != b)
rd_val |= 1 << i;
}
break;
case FCMPEQ16_OPF:
for (i = 0; i < 4; i++) {
s16 a = (rs1 >> (i * 16)) & 0xffff;
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a == b)
rd_val |= 1 << i;
}
break;
case FCMPEQ32_OPF:
for (i = 0; i < 2; i++) {
s32 a = (rs1 >> (i * 32)) & 0xffff;
s32 b = (rs2 >> (i * 32)) & 0xffff;
if (a == b)
rd_val |= 1 << i;
}
break;
}
maybe_flush_windows(0, 0, RD(insn), 0);
store_reg(regs, rd_val, RD(insn));
}
/* Emulate the VIS instructions which are not implemented in
* hardware on Niagara.
*/
int vis_emul(struct pt_regs *regs, unsigned int insn)
{
unsigned long pc = regs->tpc;
unsigned int opf;
BUG_ON(regs->tstate & TSTATE_PRIV);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc))
return -EFAULT;
save_and_clear_fpu();
opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT;
switch (opf) {
default:
return -EINVAL;
/* Pixel Formatting Instructions. */
case FPACK16_OPF:
case FPACK32_OPF:
case FPACKFIX_OPF:
case FEXPAND_OPF:
case FPMERGE_OPF:
pformat(regs, insn, opf);
break;
/* Partitioned Multiply Instructions */
case FMUL8x16_OPF:
case FMUL8x16AU_OPF:
case FMUL8x16AL_OPF:
case FMUL8SUx16_OPF:
case FMUL8ULx16_OPF:
case FMULD8SUx16_OPF:
case FMULD8ULx16_OPF:
pmul(regs, insn, opf);
break;
/* Pixel Compare Instructions */
case FCMPGT16_OPF:
case FCMPGT32_OPF:
case FCMPLE16_OPF:
case FCMPLE32_OPF:
case FCMPNE16_OPF:
case FCMPNE32_OPF:
case FCMPEQ16_OPF:
case FCMPEQ32_OPF:
pcmp(regs, insn, opf);
break;
/* Edge Handling Instructions */
case EDGE8_OPF:
case EDGE8N_OPF:
case EDGE8L_OPF:
case EDGE8LN_OPF:
case EDGE16_OPF:
case EDGE16N_OPF:
case EDGE16L_OPF:
case EDGE16LN_OPF:
case EDGE32_OPF:
case EDGE32N_OPF:
case EDGE32L_OPF:
case EDGE32LN_OPF:
edge(regs, insn, opf);
break;
/* Pixel Component Distance */
case PDIST_OPF:
pdist(regs, insn);
break;
/* Three-Dimensional Array Addressing Instructions */
case ARRAY8_OPF:
case ARRAY16_OPF:
case ARRAY32_OPF:
array(regs, insn, opf);
break;
/* Byte Mask and Shuffle Instructions */
case BMASK_OPF:
bmask(regs, insn);
break;
case BSHUFFLE_OPF:
bshuffle(regs, insn);
break;
}
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 0;
}
|
39,372,067,583,112 | /*
* arch/sparc/math-emu/math.c
*
* Copyright (C) 1998 Peter Maydell (pmaydell@chiark.greenend.org.uk)
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*
* This is a good place to start if you're trying to understand the
* emulation code, because it's pretty simple. What we do is
* essentially analyse the instruction to work out what the operation
* is and which registers are involved. We then execute the appropriate
* FXXXX function. [The floating point queue introduces a minor wrinkle;
* see below...]
* The fxxxxx.c files each emulate a single insn. They look relatively
* simple because the complexity is hidden away in an unholy tangle
* of preprocessor macros.
*
* The first layer of macros is single.h, double.h, quad.h. Generally
* these files define macros for working with floating point numbers
* of the three IEEE formats. FP_ADD_D(R,A,B) is for adding doubles,
* for instance. These macros are usually defined as calls to more
* generic macros (in this case _FP_ADD(D,2,R,X,Y) where the number
* of machine words required to store the given IEEE format is passed
* as a parameter. [double.h and co check the number of bits in a word
* and define FP_ADD_D & co appropriately].
* The generic macros are defined in op-common.h. This is where all
* the grotty stuff like handling NaNs is coded. To handle the possible
* word sizes macros in op-common.h use macros like _FP_FRAC_SLL_##wc()
* where wc is the 'number of machine words' parameter (here 2).
* These are defined in the third layer of macros: op-1.h, op-2.h
* and op-4.h. These handle operations on floating point numbers composed
* of 1,2 and 4 machine words respectively. [For example, on sparc64
* doubles are one machine word so macros in double.h eventually use
* constructs in op-1.h, but on sparc32 they use op-2.h definitions.]
* soft-fp.h is on the same level as op-common.h, and defines some
* macros which are independent of both word size and FP format.
* Finally, sfp-machine.h is the machine dependent part of the
* code: it defines the word size and what type a word is. It also
* defines how _FP_MUL_MEAT_t() maps to _FP_MUL_MEAT_n_* : op-n.h
* provide several possible flavours of multiply algorithm, most
* of which require that you supply some form of asm or C primitive to
* do the actual multiply. (such asm primitives should be defined
* in sfp-machine.h too). udivmodti4.c is the same sort of thing.
*
* There may be some errors here because I'm working from a
* SPARC architecture manual V9, and what I really want is V8...
* Also, the insns which can generate exceptions seem to be a
* greater subset of the FPops than for V9 (for example, FCMPED
* has to be emulated on V8). So I think I'm going to have
* to emulate them all just to be on the safe side...
*
* Emulation routines originate from soft-fp package, which is
* part of glibc and has appropriate copyrights in it (allegedly).
*
* NB: on sparc int == long == 4 bytes, long long == 8 bytes.
* Most bits of the kernel seem to go for long rather than int,
* so we follow that practice...
*/
/* TODO:
* fpsave() saves the FP queue but fpload() doesn't reload it.
* Therefore when we context switch or change FPU ownership
* we have to check to see if the queue had anything in it and
* emulate it if it did. This is going to be a pain.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include "sfp-util_32.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#include <math-emu/quad.h>
#define FLOATFUNC(x) extern int x(void *,void *,void *)
/* The Vn labels indicate what version of the SPARC architecture gas thinks
* each insn is. This is from the binutils source :->
*/
/* quadword instructions */
#define FSQRTQ 0x02b /* v8 */
#define FADDQ 0x043 /* v8 */
#define FSUBQ 0x047 /* v8 */
#define FMULQ 0x04b /* v8 */
#define FDIVQ 0x04f /* v8 */
#define FDMULQ 0x06e /* v8 */
#define FQTOS 0x0c7 /* v8 */
#define FQTOD 0x0cb /* v8 */
#define FITOQ 0x0cc /* v8 */
#define FSTOQ 0x0cd /* v8 */
#define FDTOQ 0x0ce /* v8 */
#define FQTOI 0x0d3 /* v8 */
#define FCMPQ 0x053 /* v8 */
#define FCMPEQ 0x057 /* v8 */
/* single/double instructions (subnormal): should all work */
#define FSQRTS 0x029 /* v7 */
#define FSQRTD 0x02a /* v7 */
#define FADDS 0x041 /* v6 */
#define FADDD 0x042 /* v6 */
#define FSUBS 0x045 /* v6 */
#define FSUBD 0x046 /* v6 */
#define FMULS 0x049 /* v6 */
#define FMULD 0x04a /* v6 */
#define FDIVS 0x04d /* v6 */
#define FDIVD 0x04e /* v6 */
#define FSMULD 0x069 /* v6 */
#define FDTOS 0x0c6 /* v6 */
#define FSTOD 0x0c9 /* v6 */
#define FSTOI 0x0d1 /* v6 */
#define FDTOI 0x0d2 /* v6 */
#define FABSS 0x009 /* v6 */
#define FCMPS 0x051 /* v6 */
#define FCMPES 0x055 /* v6 */
#define FCMPD 0x052 /* v6 */
#define FCMPED 0x056 /* v6 */
#define FMOVS 0x001 /* v6 */
#define FNEGS 0x005 /* v6 */
#define FITOS 0x0c4 /* v6 */
#define FITOD 0x0c8 /* v6 */
#define FSR_TEM_SHIFT 23UL
#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
#define FSR_AEXC_SHIFT 5UL
#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
#define FSR_CEXC_SHIFT 0UL
#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs);
/* Unlike the Sparc64 version (which has a struct fpustate), we
* pass the taskstruct corresponding to the task which currently owns the
* FPU. This is partly because we don't have the fpustate struct and
* partly because the task owning the FPU isn't always current (as is
* the case for the Sparc64 port). This is probably SMP-related...
* This function returns 1 if all queued insns were emulated successfully.
* The test for unimplemented FPop in kernel mode has been moved into
* kernel/traps.c for simplicity.
*/
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
{
/* regs->pc isn't necessarily the PC at which the offending insn is sitting.
* The FPU maintains a queue of FPops which cause traps.
* When it hits an instruction that requires that the trapped op succeeded
* (usually because it reads a reg. that the trapped op wrote) then it
* causes this exception. We need to emulate all the insns on the queue
* and then allow the op to proceed.
* This code should also handle the case where the trap was precise,
* in which case the queue length is zero and regs->pc points at the
* single FPop to be emulated. (this case is untested, though :->)
* You'll need this case if you want to be able to emulate all FPops
* because the FPU either doesn't exist or has been software-disabled.
* [The UltraSPARC makes FP a precise trap; this isn't as stupid as it
* might sound because the Ultra does funky things with a superscalar
* architecture.]
*/
/* You wouldn't believe how often I typed 'ftp' when I meant 'fpt' :-> */
int i;
int retcode = 0; /* assume all succeed */
unsigned long insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
for (i = 0; i < fpt->thread.fpqdepth; i++)
printk("%d: %08lx at %08lx\n", i, fpt->thread.fpqueue[i].insn,
(unsigned long)fpt->thread.fpqueue[i].insn_addr);
#endif
if (fpt->thread.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */
#ifdef DEBUG_MATHEMU
printk("precise trap at %08lx\n", regs->pc);
#endif
if (!get_user(insn, (u32 __user *) regs->pc)) {
retcode = do_one_mathemu(insn, &fpt->thread.fsr, fpt->thread.float_regs);
if (retcode) {
/* in this case we need to fix up PC & nPC */
regs->pc = regs->npc;
regs->npc += 4;
}
}
return retcode;
}
/* Normal case: need to empty the queue... */
for (i = 0; i < fpt->thread.fpqdepth; i++) {
retcode = do_one_mathemu(fpt->thread.fpqueue[i].insn, &(fpt->thread.fsr), fpt->thread.float_regs);
if (!retcode) /* insn failed, no point doing any more */
break;
}
/* Now empty the queue and clear the queue_not_empty flag */
if (retcode)
fpt->thread.fsr &= ~(0x3000 | FSR_CEXC_MASK);
else
fpt->thread.fsr &= ~0x3000;
fpt->thread.fpqdepth = 0;
return retcode;
}
/* All routines returning an exception to raise should detect
* such exceptions _before_ rounding to be consistent with
* the behavior of the hardware in the implemented cases
* (and thus with the recommendations in the V9 architecture
* manual).
*
* We return 0 if a SIGFPE should be sent, 1 otherwise.
*/
static inline int record_exception(unsigned long *pfsr, int eflag)
{
unsigned long fsr = *pfsr;
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if (would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if ((eflag & (eflag - 1)) != 0) {
if (eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if (eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if (eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if (eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if (eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
/* Set CEXC, here is the rule:
*
* In general all FPU ops will set one and only one
* bit in the CEXC field, this is always the case
* when the IEEE exception trap is enabled in TEM.
*/
fsr &= ~(FSR_CEXC_MASK);
fsr |= ((long)eflag << FSR_CEXC_SHIFT);
/* Set the AEXC field, rule is:
*
* If a trap would not be generated, the
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if (would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if (would_trap != 0)
fsr |= (1UL << 14);
*pfsr = fsr;
return (would_trap ? 0 : 1);
}
typedef union {
u32 s;
u64 d;
u64 q[2];
} *argp;
static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
{
/* Emulate the given insn, updating fsr and fregs appropriately. */
int type = 0;
/* r is rd, b is rs2 and a is rs1. The *u arg tells
whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
#define TYPE(dummy, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6)
int freg;
argp rs1 = NULL, rs2 = NULL, rd = NULL;
FP_DECL_EX;
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
int IR;
long fsr;
#ifdef DEBUG_MATHEMU
printk("In do_mathemu(), emulating %08lx\n", insn);
#endif
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
case FADDQ:
case FSUBQ:
case FMULQ:
case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
case FQTOS: TYPE(3,1,1,3,1,0,0); break;
case FQTOD: TYPE(3,2,1,3,1,0,0); break;
case FITOQ: TYPE(3,3,1,1,0,0,0); break;
case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
case FQTOI: TYPE(3,1,0,3,1,0,0); break;
case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
case FADDD:
case FSUBD:
case FMULD:
case FDIVD: TYPE(2,2,1,2,1,2,1); break;
case FADDS:
case FSUBS:
case FMULS:
case FDIVS: TYPE(2,1,1,1,1,1,1); break;
case FSMULD: TYPE(2,2,1,1,1,1,1); break;
case FDTOS: TYPE(2,1,1,2,1,0,0); break;
case FSTOD: TYPE(2,2,1,1,1,0,0); break;
case FSTOI: TYPE(2,1,0,1,1,0,0); break;
case FDTOI: TYPE(2,1,0,2,1,0,0); break;
case FITOS: TYPE(2,1,1,1,0,0,0); break;
case FITOD: TYPE(2,2,1,1,0,0,0); break;
case FMOVS:
case FABSS:
case FNEGS: TYPE(2,1,0,1,0,0,0); break;
}
} else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
switch ((insn >> 5) & 0x1ff) {
case FCMPS: TYPE(3,0,0,1,1,1,1); break;
case FCMPES: TYPE(3,0,0,1,1,1,1); break;
case FCMPD: TYPE(3,0,0,2,1,2,1); break;
case FCMPED: TYPE(3,0,0,2,1,2,1); break;
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
}
}
if (!type) { /* oops, didn't recognise that FPop */
#ifdef DEBUG_MATHEMU
printk("attempt to emulate unrecognised FPop!\n");
#endif
return 0;
}
/* Decode the registers to be used */
freg = (*pfsr >> 14) & 0xf;
*pfsr &= ~0x1c000; /* clear the traptype bits */
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) { /* is rs1 single, double or quad? */
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
}
rs1 = (argp)&fregs[freg];
switch (type & 0x7) {
case 7: FP_UNPACK_QP (QA, rs1); break;
case 6: FP_UNPACK_DP (DA, rs1); break;
case 5: FP_UNPACK_SP (SA, rs1); break;
}
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) { /* same again for rs2 */
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
}
rs2 = (argp)&fregs[freg];
switch ((type >> 3) & 0x7) {
case 7: FP_UNPACK_QP (QB, rs2); break;
case 6: FP_UNPACK_DP (DB, rs2); break;
case 5: FP_UNPACK_SP (SB, rs2); break;
}
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) { /* and finally rd. This one's a bit different */
case 0: /* dest is fcc. (this must be FCMPQ or FCMPEQ) */
if (freg) { /* V8 has only one set of condition codes, so */
/* anything but 0 in the rd field is an error */
*pfsr |= (6 << 14); /* (should probably flag as invalid opcode */
return 0; /* but SIGFPE will do :-> ) */
}
break;
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
/* fall through */
case 1:
rd = (void *)&fregs[freg];
break;
}
#ifdef DEBUG_MATHEMU
printk("executing insn...\n");
#endif
/* do the Right Thing */
switch ((insn >> 5) & 0x1ff) {
/* + */
case FADDS: FP_ADD_S (SR, SA, SB); break;
case FADDD: FP_ADD_D (DR, DA, DB); break;
case FADDQ: FP_ADD_Q (QR, QA, QB); break;
/* - */
case FSUBS: FP_SUB_S (SR, SA, SB); break;
case FSUBD: FP_SUB_D (DR, DA, DB); break;
case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
/* * */
case FMULS: FP_MUL_S (SR, SA, SB); break;
case FSMULD: FP_CONV (D, S, 2, 1, DA, SA);
FP_CONV (D, S, 2, 1, DB, SB);
case FMULD: FP_MUL_D (DR, DA, DB); break;
case FDMULQ: FP_CONV (Q, D, 4, 2, QA, DA);
FP_CONV (Q, D, 4, 2, QB, DB);
case FMULQ: FP_MUL_Q (QR, QA, QB); break;
/* / */
case FDIVS: FP_DIV_S (SR, SA, SB); break;
case FDIVD: FP_DIV_D (DR, DA, DB); break;
case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
/* sqrt */
case FSQRTS: FP_SQRT_S (SR, SB); break;
case FSQRTD: FP_SQRT_D (DR, DB); break;
case FSQRTQ: FP_SQRT_Q (QR, QB); break;
/* mov */
case FMOVS: rd->s = rs2->s; break;
case FABSS: rd->s = rs2->s & 0x7fffffff; break;
case FNEGS: rd->s = rs2->s ^ 0x80000000; break;
/* float to int */
case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
/* int to float */
case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
/* float to float */
case FSTOD: FP_CONV (D, S, 2, 1, DR, SB); break;
case FSTOQ: FP_CONV (Q, S, 4, 1, QR, SB); break;
case FDTOQ: FP_CONV (Q, D, 4, 2, QR, DB); break;
case FDTOS: FP_CONV (S, D, 1, 2, SR, DB); break;
case FQTOS: FP_CONV (S, Q, 1, 4, SR, QB); break;
case FQTOD: FP_CONV (D, Q, 2, 4, DR, QB); break;
/* comparison */
case FCMPS:
case FCMPES:
FP_CMP_S(IR, SB, SA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPES ||
FP_ISSIGNAN_S(SA) ||
FP_ISSIGNAN_S(SB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
break;
case FCMPD:
case FCMPED:
FP_CMP_D(IR, DB, DA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPED ||
FP_ISSIGNAN_D(DA) ||
FP_ISSIGNAN_D(DB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
break;
case FCMPQ:
case FCMPEQ:
FP_CMP_Q(IR, QB, QA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPEQ ||
FP_ISSIGNAN_Q(QA) ||
FP_ISSIGNAN_Q(QB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: fsr = *pfsr;
if (IR == -1) IR = 2;
/* fcc is always fcc0 */
fsr &= ~0xc00; fsr |= (IR << 10); break;
*pfsr = fsr;
break;
case 1: rd->s = IR; break;
case 5: FP_PACK_SP (rd, SR); break;
case 6: FP_PACK_DP (rd, DR); break;
case 7: FP_PACK_QP (rd, QR); break;
}
}
if (_fex == 0)
return 1; /* success! */
return record_exception(pfsr, _fex);
}
| /*
* arch/sparc/math-emu/math.c
*
* Copyright (C) 1998 Peter Maydell (pmaydell@chiark.greenend.org.uk)
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*
* This is a good place to start if you're trying to understand the
* emulation code, because it's pretty simple. What we do is
* essentially analyse the instruction to work out what the operation
* is and which registers are involved. We then execute the appropriate
* FXXXX function. [The floating point queue introduces a minor wrinkle;
* see below...]
* The fxxxxx.c files each emulate a single insn. They look relatively
* simple because the complexity is hidden away in an unholy tangle
* of preprocessor macros.
*
* The first layer of macros is single.h, double.h, quad.h. Generally
* these files define macros for working with floating point numbers
* of the three IEEE formats. FP_ADD_D(R,A,B) is for adding doubles,
* for instance. These macros are usually defined as calls to more
* generic macros (in this case _FP_ADD(D,2,R,X,Y) where the number
* of machine words required to store the given IEEE format is passed
* as a parameter. [double.h and co check the number of bits in a word
* and define FP_ADD_D & co appropriately].
* The generic macros are defined in op-common.h. This is where all
* the grotty stuff like handling NaNs is coded. To handle the possible
* word sizes macros in op-common.h use macros like _FP_FRAC_SLL_##wc()
* where wc is the 'number of machine words' parameter (here 2).
* These are defined in the third layer of macros: op-1.h, op-2.h
* and op-4.h. These handle operations on floating point numbers composed
* of 1,2 and 4 machine words respectively. [For example, on sparc64
* doubles are one machine word so macros in double.h eventually use
* constructs in op-1.h, but on sparc32 they use op-2.h definitions.]
* soft-fp.h is on the same level as op-common.h, and defines some
* macros which are independent of both word size and FP format.
* Finally, sfp-machine.h is the machine dependent part of the
* code: it defines the word size and what type a word is. It also
* defines how _FP_MUL_MEAT_t() maps to _FP_MUL_MEAT_n_* : op-n.h
* provide several possible flavours of multiply algorithm, most
* of which require that you supply some form of asm or C primitive to
* do the actual multiply. (such asm primitives should be defined
* in sfp-machine.h too). udivmodti4.c is the same sort of thing.
*
* There may be some errors here because I'm working from a
* SPARC architecture manual V9, and what I really want is V8...
* Also, the insns which can generate exceptions seem to be a
* greater subset of the FPops than for V9 (for example, FCMPED
* has to be emulated on V8). So I think I'm going to have
* to emulate them all just to be on the safe side...
*
* Emulation routines originate from soft-fp package, which is
* part of glibc and has appropriate copyrights in it (allegedly).
*
* NB: on sparc int == long == 4 bytes, long long == 8 bytes.
* Most bits of the kernel seem to go for long rather than int,
* so we follow that practice...
*/
/* TODO:
* fpsave() saves the FP queue but fpload() doesn't reload it.
* Therefore when we context switch or change FPU ownership
* we have to check to see if the queue had anything in it and
* emulate it if it did. This is going to be a pain.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include "sfp-util_32.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#include <math-emu/quad.h>
#define FLOATFUNC(x) extern int x(void *,void *,void *)
/* The Vn labels indicate what version of the SPARC architecture gas thinks
* each insn is. This is from the binutils source :->
*/
/* quadword instructions */
#define FSQRTQ 0x02b /* v8 */
#define FADDQ 0x043 /* v8 */
#define FSUBQ 0x047 /* v8 */
#define FMULQ 0x04b /* v8 */
#define FDIVQ 0x04f /* v8 */
#define FDMULQ 0x06e /* v8 */
#define FQTOS 0x0c7 /* v8 */
#define FQTOD 0x0cb /* v8 */
#define FITOQ 0x0cc /* v8 */
#define FSTOQ 0x0cd /* v8 */
#define FDTOQ 0x0ce /* v8 */
#define FQTOI 0x0d3 /* v8 */
#define FCMPQ 0x053 /* v8 */
#define FCMPEQ 0x057 /* v8 */
/* single/double instructions (subnormal): should all work */
#define FSQRTS 0x029 /* v7 */
#define FSQRTD 0x02a /* v7 */
#define FADDS 0x041 /* v6 */
#define FADDD 0x042 /* v6 */
#define FSUBS 0x045 /* v6 */
#define FSUBD 0x046 /* v6 */
#define FMULS 0x049 /* v6 */
#define FMULD 0x04a /* v6 */
#define FDIVS 0x04d /* v6 */
#define FDIVD 0x04e /* v6 */
#define FSMULD 0x069 /* v6 */
#define FDTOS 0x0c6 /* v6 */
#define FSTOD 0x0c9 /* v6 */
#define FSTOI 0x0d1 /* v6 */
#define FDTOI 0x0d2 /* v6 */
#define FABSS 0x009 /* v6 */
#define FCMPS 0x051 /* v6 */
#define FCMPES 0x055 /* v6 */
#define FCMPD 0x052 /* v6 */
#define FCMPED 0x056 /* v6 */
#define FMOVS 0x001 /* v6 */
#define FNEGS 0x005 /* v6 */
#define FITOS 0x0c4 /* v6 */
#define FITOD 0x0c8 /* v6 */
#define FSR_TEM_SHIFT 23UL
#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
#define FSR_AEXC_SHIFT 5UL
#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
#define FSR_CEXC_SHIFT 0UL
#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs);
/* Unlike the Sparc64 version (which has a struct fpustate), we
* pass the taskstruct corresponding to the task which currently owns the
* FPU. This is partly because we don't have the fpustate struct and
* partly because the task owning the FPU isn't always current (as is
* the case for the Sparc64 port). This is probably SMP-related...
* This function returns 1 if all queued insns were emulated successfully.
* The test for unimplemented FPop in kernel mode has been moved into
* kernel/traps.c for simplicity.
*/
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
{
/* regs->pc isn't necessarily the PC at which the offending insn is sitting.
* The FPU maintains a queue of FPops which cause traps.
* When it hits an instruction that requires that the trapped op succeeded
* (usually because it reads a reg. that the trapped op wrote) then it
* causes this exception. We need to emulate all the insns on the queue
* and then allow the op to proceed.
* This code should also handle the case where the trap was precise,
* in which case the queue length is zero and regs->pc points at the
* single FPop to be emulated. (this case is untested, though :->)
* You'll need this case if you want to be able to emulate all FPops
* because the FPU either doesn't exist or has been software-disabled.
* [The UltraSPARC makes FP a precise trap; this isn't as stupid as it
* might sound because the Ultra does funky things with a superscalar
* architecture.]
*/
/* You wouldn't believe how often I typed 'ftp' when I meant 'fpt' :-> */
int i;
int retcode = 0; /* assume all succeed */
unsigned long insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
for (i = 0; i < fpt->thread.fpqdepth; i++)
printk("%d: %08lx at %08lx\n", i, fpt->thread.fpqueue[i].insn,
(unsigned long)fpt->thread.fpqueue[i].insn_addr);
#endif
if (fpt->thread.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */
#ifdef DEBUG_MATHEMU
printk("precise trap at %08lx\n", regs->pc);
#endif
if (!get_user(insn, (u32 __user *) regs->pc)) {
retcode = do_one_mathemu(insn, &fpt->thread.fsr, fpt->thread.float_regs);
if (retcode) {
/* in this case we need to fix up PC & nPC */
regs->pc = regs->npc;
regs->npc += 4;
}
}
return retcode;
}
/* Normal case: need to empty the queue... */
for (i = 0; i < fpt->thread.fpqdepth; i++) {
retcode = do_one_mathemu(fpt->thread.fpqueue[i].insn, &(fpt->thread.fsr), fpt->thread.float_regs);
if (!retcode) /* insn failed, no point doing any more */
break;
}
/* Now empty the queue and clear the queue_not_empty flag */
if (retcode)
fpt->thread.fsr &= ~(0x3000 | FSR_CEXC_MASK);
else
fpt->thread.fsr &= ~0x3000;
fpt->thread.fpqdepth = 0;
return retcode;
}
/* All routines returning an exception to raise should detect
* such exceptions _before_ rounding to be consistent with
* the behavior of the hardware in the implemented cases
* (and thus with the recommendations in the V9 architecture
* manual).
*
* We return 0 if a SIGFPE should be sent, 1 otherwise.
*/
static inline int record_exception(unsigned long *pfsr, int eflag)
{
unsigned long fsr = *pfsr;
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if (would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if ((eflag & (eflag - 1)) != 0) {
if (eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if (eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if (eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if (eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if (eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
/* Set CEXC, here is the rule:
*
* In general all FPU ops will set one and only one
* bit in the CEXC field, this is always the case
* when the IEEE exception trap is enabled in TEM.
*/
fsr &= ~(FSR_CEXC_MASK);
fsr |= ((long)eflag << FSR_CEXC_SHIFT);
/* Set the AEXC field, rule is:
*
* If a trap would not be generated, the
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if (would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if (would_trap != 0)
fsr |= (1UL << 14);
*pfsr = fsr;
return (would_trap ? 0 : 1);
}
typedef union {
u32 s;
u64 d;
u64 q[2];
} *argp;
static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
{
/* Emulate the given insn, updating fsr and fregs appropriately. */
int type = 0;
/* r is rd, b is rs2 and a is rs1. The *u arg tells
whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
#define TYPE(dummy, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6)
int freg;
argp rs1 = NULL, rs2 = NULL, rd = NULL;
FP_DECL_EX;
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
int IR;
long fsr;
#ifdef DEBUG_MATHEMU
printk("In do_mathemu(), emulating %08lx\n", insn);
#endif
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
case FADDQ:
case FSUBQ:
case FMULQ:
case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
case FQTOS: TYPE(3,1,1,3,1,0,0); break;
case FQTOD: TYPE(3,2,1,3,1,0,0); break;
case FITOQ: TYPE(3,3,1,1,0,0,0); break;
case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
case FQTOI: TYPE(3,1,0,3,1,0,0); break;
case FSQRTS: TYPE(2,1,1,1,1,0,0); break;
case FSQRTD: TYPE(2,2,1,2,1,0,0); break;
case FADDD:
case FSUBD:
case FMULD:
case FDIVD: TYPE(2,2,1,2,1,2,1); break;
case FADDS:
case FSUBS:
case FMULS:
case FDIVS: TYPE(2,1,1,1,1,1,1); break;
case FSMULD: TYPE(2,2,1,1,1,1,1); break;
case FDTOS: TYPE(2,1,1,2,1,0,0); break;
case FSTOD: TYPE(2,2,1,1,1,0,0); break;
case FSTOI: TYPE(2,1,0,1,1,0,0); break;
case FDTOI: TYPE(2,1,0,2,1,0,0); break;
case FITOS: TYPE(2,1,1,1,0,0,0); break;
case FITOD: TYPE(2,2,1,1,0,0,0); break;
case FMOVS:
case FABSS:
case FNEGS: TYPE(2,1,0,1,0,0,0); break;
}
} else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
switch ((insn >> 5) & 0x1ff) {
case FCMPS: TYPE(3,0,0,1,1,1,1); break;
case FCMPES: TYPE(3,0,0,1,1,1,1); break;
case FCMPD: TYPE(3,0,0,2,1,2,1); break;
case FCMPED: TYPE(3,0,0,2,1,2,1); break;
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
}
}
if (!type) { /* oops, didn't recognise that FPop */
#ifdef DEBUG_MATHEMU
printk("attempt to emulate unrecognised FPop!\n");
#endif
return 0;
}
/* Decode the registers to be used */
freg = (*pfsr >> 14) & 0xf;
*pfsr &= ~0x1c000; /* clear the traptype bits */
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) { /* is rs1 single, double or quad? */
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
}
rs1 = (argp)&fregs[freg];
switch (type & 0x7) {
case 7: FP_UNPACK_QP (QA, rs1); break;
case 6: FP_UNPACK_DP (DA, rs1); break;
case 5: FP_UNPACK_SP (SA, rs1); break;
}
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) { /* same again for rs2 */
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
}
rs2 = (argp)&fregs[freg];
switch ((type >> 3) & 0x7) {
case 7: FP_UNPACK_QP (QB, rs2); break;
case 6: FP_UNPACK_DP (DB, rs2); break;
case 5: FP_UNPACK_SP (SB, rs2); break;
}
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) { /* and finally rd. This one's a bit different */
case 0: /* dest is fcc. (this must be FCMPQ or FCMPEQ) */
if (freg) { /* V8 has only one set of condition codes, so */
/* anything but 0 in the rd field is an error */
*pfsr |= (6 << 14); /* (should probably flag as invalid opcode */
return 0; /* but SIGFPE will do :-> ) */
}
break;
case 3:
if (freg & 3) { /* quadwords must have bits 4&5 of the */
/* encoded reg. number set to zero. */
*pfsr |= (6 << 14);
return 0; /* simulate invalid_fp_register exception */
}
/* fall through */
case 2:
if (freg & 1) { /* doublewords must have bit 5 zeroed */
*pfsr |= (6 << 14);
return 0;
}
/* fall through */
case 1:
rd = (void *)&fregs[freg];
break;
}
#ifdef DEBUG_MATHEMU
printk("executing insn...\n");
#endif
/* do the Right Thing */
switch ((insn >> 5) & 0x1ff) {
/* + */
case FADDS: FP_ADD_S (SR, SA, SB); break;
case FADDD: FP_ADD_D (DR, DA, DB); break;
case FADDQ: FP_ADD_Q (QR, QA, QB); break;
/* - */
case FSUBS: FP_SUB_S (SR, SA, SB); break;
case FSUBD: FP_SUB_D (DR, DA, DB); break;
case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
/* * */
case FMULS: FP_MUL_S (SR, SA, SB); break;
case FSMULD: FP_CONV (D, S, 2, 1, DA, SA);
FP_CONV (D, S, 2, 1, DB, SB);
case FMULD: FP_MUL_D (DR, DA, DB); break;
case FDMULQ: FP_CONV (Q, D, 4, 2, QA, DA);
FP_CONV (Q, D, 4, 2, QB, DB);
case FMULQ: FP_MUL_Q (QR, QA, QB); break;
/* / */
case FDIVS: FP_DIV_S (SR, SA, SB); break;
case FDIVD: FP_DIV_D (DR, DA, DB); break;
case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
/* sqrt */
case FSQRTS: FP_SQRT_S (SR, SB); break;
case FSQRTD: FP_SQRT_D (DR, DB); break;
case FSQRTQ: FP_SQRT_Q (QR, QB); break;
/* mov */
case FMOVS: rd->s = rs2->s; break;
case FABSS: rd->s = rs2->s & 0x7fffffff; break;
case FNEGS: rd->s = rs2->s ^ 0x80000000; break;
/* float to int */
case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
/* int to float */
case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
/* float to float */
case FSTOD: FP_CONV (D, S, 2, 1, DR, SB); break;
case FSTOQ: FP_CONV (Q, S, 4, 1, QR, SB); break;
case FDTOQ: FP_CONV (Q, D, 4, 2, QR, DB); break;
case FDTOS: FP_CONV (S, D, 1, 2, SR, DB); break;
case FQTOS: FP_CONV (S, Q, 1, 4, SR, QB); break;
case FQTOD: FP_CONV (D, Q, 2, 4, DR, QB); break;
/* comparison */
case FCMPS:
case FCMPES:
FP_CMP_S(IR, SB, SA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPES ||
FP_ISSIGNAN_S(SA) ||
FP_ISSIGNAN_S(SB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
break;
case FCMPD:
case FCMPED:
FP_CMP_D(IR, DB, DA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPED ||
FP_ISSIGNAN_D(DA) ||
FP_ISSIGNAN_D(DB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
break;
case FCMPQ:
case FCMPEQ:
FP_CMP_Q(IR, QB, QA, 3);
if (IR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPEQ ||
FP_ISSIGNAN_Q(QA) ||
FP_ISSIGNAN_Q(QB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: fsr = *pfsr;
if (IR == -1) IR = 2;
/* fcc is always fcc0 */
fsr &= ~0xc00; fsr |= (IR << 10); break;
*pfsr = fsr;
break;
case 1: rd->s = IR; break;
case 5: FP_PACK_SP (rd, SR); break;
case 6: FP_PACK_DP (rd, DR); break;
case 7: FP_PACK_QP (rd, QR); break;
}
}
if (_fex == 0)
return 1; /* success! */
return record_exception(pfsr, _fex);
}
|
159,495,417,440,988 | /*
* arch/sparc64/math-emu/math.c
*
* Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*
* Emulation routines originate from soft-fp package, which is part
* of glibc and has appropriate copyrights in it.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/perf_event.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include "sfp-util_64.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#include <math-emu/quad.h>
/* QUAD - ftt == 3 */
#define FMOVQ 0x003
#define FNEGQ 0x007
#define FABSQ 0x00b
#define FSQRTQ 0x02b
#define FADDQ 0x043
#define FSUBQ 0x047
#define FMULQ 0x04b
#define FDIVQ 0x04f
#define FDMULQ 0x06e
#define FQTOX 0x083
#define FXTOQ 0x08c
#define FQTOS 0x0c7
#define FQTOD 0x0cb
#define FITOQ 0x0cc
#define FSTOQ 0x0cd
#define FDTOQ 0x0ce
#define FQTOI 0x0d3
/* SUBNORMAL - ftt == 2 */
#define FSQRTS 0x029
#define FSQRTD 0x02a
#define FADDS 0x041
#define FADDD 0x042
#define FSUBS 0x045
#define FSUBD 0x046
#define FMULS 0x049
#define FMULD 0x04a
#define FDIVS 0x04d
#define FDIVD 0x04e
#define FSMULD 0x069
#define FSTOX 0x081
#define FDTOX 0x082
#define FDTOS 0x0c6
#define FSTOD 0x0c9
#define FSTOI 0x0d1
#define FDTOI 0x0d2
#define FXTOS 0x084 /* Only Ultra-III generates this. */
#define FXTOD 0x088 /* Only Ultra-III generates this. */
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
#define FITOS 0x0c4 /* Only Ultra-III generates this. */
#endif
#define FITOD 0x0c8 /* Only Ultra-III generates this. */
/* FPOP2 */
#define FCMPQ 0x053
#define FCMPEQ 0x057
#define FMOVQ0 0x003
#define FMOVQ1 0x043
#define FMOVQ2 0x083
#define FMOVQ3 0x0c3
#define FMOVQI 0x103
#define FMOVQX 0x183
#define FMOVQZ 0x027
#define FMOVQLE 0x047
#define FMOVQLZ 0x067
#define FMOVQNZ 0x0a7
#define FMOVQGZ 0x0c7
#define FMOVQGE 0x0e7
#define FSR_TEM_SHIFT 23UL
#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
#define FSR_AEXC_SHIFT 5UL
#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
#define FSR_CEXC_SHIFT 0UL
#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
/* All routines returning an exception to raise should detect
* such exceptions _before_ rounding to be consistent with
* the behavior of the hardware in the implemented cases
* (and thus with the recommendations in the V9 architecture
* manual).
*
* We return 0 if a SIGFPE should be sent, 1 otherwise.
*/
static inline int record_exception(struct pt_regs *regs, int eflag)
{
u64 fsr = current_thread_info()->xfsr[0];
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if(would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if((eflag & (eflag - 1)) != 0) {
if(eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if(eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if(eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if(eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if(eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
/* Set CEXC, here is the rule:
*
* In general all FPU ops will set one and only one
* bit in the CEXC field, this is always the case
* when the IEEE exception trap is enabled in TEM.
*/
fsr &= ~(FSR_CEXC_MASK);
fsr |= ((long)eflag << FSR_CEXC_SHIFT);
/* Set the AEXC field, rule is:
*
* If a trap would not be generated, the
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if(would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if(would_trap != 0)
fsr |= (1UL << 14);
current_thread_info()->xfsr[0] = fsr;
/* If we will not trap, advance the program counter over
* the instruction being handled.
*/
if(would_trap == 0) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
return (would_trap ? 0 : 1);
}
typedef union {
u32 s;
u64 d;
u64 q[2];
} *argp;
int do_mathemu(struct pt_regs *regs, struct fpustate *f)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn = 0;
int type = 0;
/* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells
whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
#define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9)
int freg;
static u64 zero[2] = { 0L, 0L };
int flags;
FP_DECL_EX;
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
int IR;
long XR, xfsr;
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
/* QUAD - ftt == 3 */
case FMOVQ:
case FNEGQ:
case FABSQ: TYPE(3,3,0,3,0,0,0); break;
case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
case FADDQ:
case FSUBQ:
case FMULQ:
case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
case FQTOX: TYPE(3,2,0,3,1,0,0); break;
case FXTOQ: TYPE(3,3,1,2,0,0,0); break;
case FQTOS: TYPE(3,1,1,3,1,0,0); break;
case FQTOD: TYPE(3,2,1,3,1,0,0); break;
case FITOQ: TYPE(3,3,1,1,0,0,0); break;
case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
case FQTOI: TYPE(3,1,0,3,1,0,0); break;
/* We can get either unimplemented or unfinished
* for these cases. Pre-Niagara systems generate
* unfinished fpop for SUBNORMAL cases, and Niagara
* always gives unimplemented fpop for fsqrt{s,d}.
*/
case FSQRTS: {
unsigned long x = current_thread_info()->xfsr[0];
x = (x >> 14) & 0xf;
TYPE(x,1,1,1,1,0,0);
break;
}
case FSQRTD: {
unsigned long x = current_thread_info()->xfsr[0];
x = (x >> 14) & 0xf;
TYPE(x,2,1,2,1,0,0);
break;
}
/* SUBNORMAL - ftt == 2 */
case FADDD:
case FSUBD:
case FMULD:
case FDIVD: TYPE(2,2,1,2,1,2,1); break;
case FADDS:
case FSUBS:
case FMULS:
case FDIVS: TYPE(2,1,1,1,1,1,1); break;
case FSMULD: TYPE(2,2,1,1,1,1,1); break;
case FSTOX: TYPE(2,2,0,1,1,0,0); break;
case FDTOX: TYPE(2,2,0,2,1,0,0); break;
case FDTOS: TYPE(2,1,1,2,1,0,0); break;
case FSTOD: TYPE(2,2,1,1,1,0,0); break;
case FSTOI: TYPE(2,1,0,1,1,0,0); break;
case FDTOI: TYPE(2,1,0,2,1,0,0); break;
/* Only Ultra-III generates these */
case FXTOS: TYPE(2,1,1,2,0,0,0); break;
case FXTOD: TYPE(2,2,1,2,0,0,0); break;
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
case FITOS: TYPE(2,1,1,1,0,0,0); break;
#endif
case FITOD: TYPE(2,2,1,1,0,0,0); break;
}
}
else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
IR = 2;
switch ((insn >> 5) & 0x1ff) {
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
/* Now the conditional fmovq support */
case FMOVQ0:
case FMOVQ1:
case FMOVQ2:
case FMOVQ3:
/* fmovq %fccX, %fY, %fZ */
if (!((insn >> 11) & 3))
XR = current_thread_info()->xfsr[0] >> 10;
else
XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
XR &= 3;
IR = 0;
switch ((insn >> 14) & 0x7) {
/* case 0: IR = 0; break; */ /* Never */
case 1: if (XR) IR = 1; break; /* Not Equal */
case 2: if (XR == 1 || XR == 2) IR = 1; break; /* Less or Greater */
case 3: if (XR & 1) IR = 1; break; /* Unordered or Less */
case 4: if (XR == 1) IR = 1; break; /* Less */
case 5: if (XR & 2) IR = 1; break; /* Unordered or Greater */
case 6: if (XR == 2) IR = 1; break; /* Greater */
case 7: if (XR == 3) IR = 1; break; /* Unordered */
}
if ((insn >> 14) & 8)
IR ^= 1;
break;
case FMOVQI:
case FMOVQX:
/* fmovq %[ix]cc, %fY, %fZ */
XR = regs->tstate >> 32;
if ((insn >> 5) & 0x80)
XR >>= 4;
XR &= 0xf;
IR = 0;
freg = ((XR >> 2) ^ XR) & 2;
switch ((insn >> 14) & 0x7) {
/* case 0: IR = 0; break; */ /* Never */
case 1: if (XR & 4) IR = 1; break; /* Equal */
case 2: if ((XR & 4) || freg) IR = 1; break; /* Less or Equal */
case 3: if (freg) IR = 1; break; /* Less */
case 4: if (XR & 5) IR = 1; break; /* Less or Equal Unsigned */
case 5: if (XR & 1) IR = 1; break; /* Carry Set */
case 6: if (XR & 8) IR = 1; break; /* Negative */
case 7: if (XR & 2) IR = 1; break; /* Overflow Set */
}
if ((insn >> 14) & 8)
IR ^= 1;
break;
case FMOVQZ:
case FMOVQLE:
case FMOVQLZ:
case FMOVQNZ:
case FMOVQGZ:
case FMOVQGE:
freg = (insn >> 14) & 0x1f;
if (!freg)
XR = 0;
else if (freg < 16)
XR = regs->u_regs[freg];
else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
flushw_user ();
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(XR, &win32->locals[freg - 16]);
} else {
struct reg_window __user *win;
flushw_user ();
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(XR, &win->locals[freg - 16]);
}
IR = 0;
switch ((insn >> 10) & 3) {
case 1: if (!XR) IR = 1; break; /* Register Zero */
case 2: if (XR <= 0) IR = 1; break; /* Register Less Than or Equal to Zero */
case 3: if (XR < 0) IR = 1; break; /* Register Less Than Zero */
}
if ((insn >> 10) & 4)
IR ^= 1;
break;
}
if (IR == 0) {
/* The fmov test was false. Do a nop instead */
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
} else if (IR == 1) {
/* Change the instruction into plain fmovq */
insn = (insn & 0x3e00001f) | 0x81a00060;
TYPE(3,3,0,3,0,0,0);
}
}
}
if (type) {
argp rs1 = NULL, rs2 = NULL, rd = NULL;
freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
if (freg != (type >> 9))
goto err;
current_thread_info()->xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs1 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & flags))
rs1 = (argp)&zero;
break;
}
switch (type & 0x7) {
case 7: FP_UNPACK_QP (QA, rs1); break;
case 6: FP_UNPACK_DP (DA, rs1); break;
case 5: FP_UNPACK_SP (SA, rs1); break;
}
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs2 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & flags))
rs2 = (argp)&zero;
break;
}
switch ((type >> 3) & 0x7) {
case 7: FP_UNPACK_QP (QB, rs2); break;
case 6: FP_UNPACK_DP (DB, rs2); break;
case 5: FP_UNPACK_SP (SB, rs2); break;
}
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rd = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flags)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
current_thread_info()->fpsaved[0] |= flags;
break;
}
switch ((insn >> 5) & 0x1ff) {
/* + */
case FADDS: FP_ADD_S (SR, SA, SB); break;
case FADDD: FP_ADD_D (DR, DA, DB); break;
case FADDQ: FP_ADD_Q (QR, QA, QB); break;
/* - */
case FSUBS: FP_SUB_S (SR, SA, SB); break;
case FSUBD: FP_SUB_D (DR, DA, DB); break;
case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
/* * */
case FMULS: FP_MUL_S (SR, SA, SB); break;
case FSMULD: FP_CONV (D, S, 1, 1, DA, SA);
FP_CONV (D, S, 1, 1, DB, SB);
case FMULD: FP_MUL_D (DR, DA, DB); break;
case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA);
FP_CONV (Q, D, 2, 1, QB, DB);
case FMULQ: FP_MUL_Q (QR, QA, QB); break;
/* / */
case FDIVS: FP_DIV_S (SR, SA, SB); break;
case FDIVD: FP_DIV_D (DR, DA, DB); break;
case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
/* sqrt */
case FSQRTS: FP_SQRT_S (SR, SB); break;
case FSQRTD: FP_SQRT_D (DR, DB); break;
case FSQRTQ: FP_SQRT_Q (QR, QB); break;
/* mov */
case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
/* float to int */
case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break;
case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break;
case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break;
/* int to float */
case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break;
/* Only Ultra-III generates these */
case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break;
case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break;
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
#endif
case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
/* float to float */
case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break;
case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break;
case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break;
case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break;
case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break;
case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break;
/* comparison */
case FCMPQ:
case FCMPEQ:
FP_CMP_Q(XR, QB, QA, 3);
if (XR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPEQ ||
FP_ISSIGNAN_Q(QA) ||
FP_ISSIGNAN_Q(QB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: xfsr = current_thread_info()->xfsr[0];
if (XR == -1) XR = 2;
switch (freg & 3) {
/* fcc0, 1, 2, 3 */
case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break;
case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break;
case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
}
current_thread_info()->xfsr[0] = xfsr;
break;
case 1: rd->s = IR; break;
case 2: rd->d = XR; break;
case 5: FP_PACK_SP (rd, SR); break;
case 6: FP_PACK_DP (rd, DR); break;
case 7: FP_PACK_QP (rd, QR); break;
}
}
if(_fex != 0)
return record_exception(regs, _fex);
/* Success and no exceptions detected. */
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
}
err: return 0;
}
| /*
* arch/sparc64/math-emu/math.c
*
* Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*
* Emulation routines originate from soft-fp package, which is part
* of glibc and has appropriate copyrights in it.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/perf_event.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include "sfp-util_64.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#include <math-emu/quad.h>
/* QUAD - ftt == 3 */
#define FMOVQ 0x003
#define FNEGQ 0x007
#define FABSQ 0x00b
#define FSQRTQ 0x02b
#define FADDQ 0x043
#define FSUBQ 0x047
#define FMULQ 0x04b
#define FDIVQ 0x04f
#define FDMULQ 0x06e
#define FQTOX 0x083
#define FXTOQ 0x08c
#define FQTOS 0x0c7
#define FQTOD 0x0cb
#define FITOQ 0x0cc
#define FSTOQ 0x0cd
#define FDTOQ 0x0ce
#define FQTOI 0x0d3
/* SUBNORMAL - ftt == 2 */
#define FSQRTS 0x029
#define FSQRTD 0x02a
#define FADDS 0x041
#define FADDD 0x042
#define FSUBS 0x045
#define FSUBD 0x046
#define FMULS 0x049
#define FMULD 0x04a
#define FDIVS 0x04d
#define FDIVD 0x04e
#define FSMULD 0x069
#define FSTOX 0x081
#define FDTOX 0x082
#define FDTOS 0x0c6
#define FSTOD 0x0c9
#define FSTOI 0x0d1
#define FDTOI 0x0d2
#define FXTOS 0x084 /* Only Ultra-III generates this. */
#define FXTOD 0x088 /* Only Ultra-III generates this. */
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
#define FITOS 0x0c4 /* Only Ultra-III generates this. */
#endif
#define FITOD 0x0c8 /* Only Ultra-III generates this. */
/* FPOP2 */
#define FCMPQ 0x053
#define FCMPEQ 0x057
#define FMOVQ0 0x003
#define FMOVQ1 0x043
#define FMOVQ2 0x083
#define FMOVQ3 0x0c3
#define FMOVQI 0x103
#define FMOVQX 0x183
#define FMOVQZ 0x027
#define FMOVQLE 0x047
#define FMOVQLZ 0x067
#define FMOVQNZ 0x0a7
#define FMOVQGZ 0x0c7
#define FMOVQGE 0x0e7
#define FSR_TEM_SHIFT 23UL
#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
#define FSR_AEXC_SHIFT 5UL
#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
#define FSR_CEXC_SHIFT 0UL
#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
/* All routines returning an exception to raise should detect
* such exceptions _before_ rounding to be consistent with
* the behavior of the hardware in the implemented cases
* (and thus with the recommendations in the V9 architecture
* manual).
*
* We return 0 if a SIGFPE should be sent, 1 otherwise.
*/
static inline int record_exception(struct pt_regs *regs, int eflag)
{
u64 fsr = current_thread_info()->xfsr[0];
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if(would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if((eflag & (eflag - 1)) != 0) {
if(eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if(eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if(eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if(eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if(eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
/* Set CEXC, here is the rule:
*
* In general all FPU ops will set one and only one
* bit in the CEXC field, this is always the case
* when the IEEE exception trap is enabled in TEM.
*/
fsr &= ~(FSR_CEXC_MASK);
fsr |= ((long)eflag << FSR_CEXC_SHIFT);
/* Set the AEXC field, rule is:
*
* If a trap would not be generated, the
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if(would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if(would_trap != 0)
fsr |= (1UL << 14);
current_thread_info()->xfsr[0] = fsr;
/* If we will not trap, advance the program counter over
* the instruction being handled.
*/
if(would_trap == 0) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
return (would_trap ? 0 : 1);
}
typedef union {
u32 s;
u64 d;
u64 q[2];
} *argp;
int do_mathemu(struct pt_regs *regs, struct fpustate *f)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn = 0;
int type = 0;
/* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells
whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
#define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9)
int freg;
static u64 zero[2] = { 0L, 0L };
int flags;
FP_DECL_EX;
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
int IR;
long XR, xfsr;
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
/* QUAD - ftt == 3 */
case FMOVQ:
case FNEGQ:
case FABSQ: TYPE(3,3,0,3,0,0,0); break;
case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
case FADDQ:
case FSUBQ:
case FMULQ:
case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
case FQTOX: TYPE(3,2,0,3,1,0,0); break;
case FXTOQ: TYPE(3,3,1,2,0,0,0); break;
case FQTOS: TYPE(3,1,1,3,1,0,0); break;
case FQTOD: TYPE(3,2,1,3,1,0,0); break;
case FITOQ: TYPE(3,3,1,1,0,0,0); break;
case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
case FQTOI: TYPE(3,1,0,3,1,0,0); break;
/* We can get either unimplemented or unfinished
* for these cases. Pre-Niagara systems generate
* unfinished fpop for SUBNORMAL cases, and Niagara
* always gives unimplemented fpop for fsqrt{s,d}.
*/
case FSQRTS: {
unsigned long x = current_thread_info()->xfsr[0];
x = (x >> 14) & 0xf;
TYPE(x,1,1,1,1,0,0);
break;
}
case FSQRTD: {
unsigned long x = current_thread_info()->xfsr[0];
x = (x >> 14) & 0xf;
TYPE(x,2,1,2,1,0,0);
break;
}
/* SUBNORMAL - ftt == 2 */
case FADDD:
case FSUBD:
case FMULD:
case FDIVD: TYPE(2,2,1,2,1,2,1); break;
case FADDS:
case FSUBS:
case FMULS:
case FDIVS: TYPE(2,1,1,1,1,1,1); break;
case FSMULD: TYPE(2,2,1,1,1,1,1); break;
case FSTOX: TYPE(2,2,0,1,1,0,0); break;
case FDTOX: TYPE(2,2,0,2,1,0,0); break;
case FDTOS: TYPE(2,1,1,2,1,0,0); break;
case FSTOD: TYPE(2,2,1,1,1,0,0); break;
case FSTOI: TYPE(2,1,0,1,1,0,0); break;
case FDTOI: TYPE(2,1,0,2,1,0,0); break;
/* Only Ultra-III generates these */
case FXTOS: TYPE(2,1,1,2,0,0,0); break;
case FXTOD: TYPE(2,2,1,2,0,0,0); break;
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
case FITOS: TYPE(2,1,1,1,0,0,0); break;
#endif
case FITOD: TYPE(2,2,1,1,0,0,0); break;
}
}
else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
IR = 2;
switch ((insn >> 5) & 0x1ff) {
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
/* Now the conditional fmovq support */
case FMOVQ0:
case FMOVQ1:
case FMOVQ2:
case FMOVQ3:
/* fmovq %fccX, %fY, %fZ */
if (!((insn >> 11) & 3))
XR = current_thread_info()->xfsr[0] >> 10;
else
XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
XR &= 3;
IR = 0;
switch ((insn >> 14) & 0x7) {
/* case 0: IR = 0; break; */ /* Never */
case 1: if (XR) IR = 1; break; /* Not Equal */
case 2: if (XR == 1 || XR == 2) IR = 1; break; /* Less or Greater */
case 3: if (XR & 1) IR = 1; break; /* Unordered or Less */
case 4: if (XR == 1) IR = 1; break; /* Less */
case 5: if (XR & 2) IR = 1; break; /* Unordered or Greater */
case 6: if (XR == 2) IR = 1; break; /* Greater */
case 7: if (XR == 3) IR = 1; break; /* Unordered */
}
if ((insn >> 14) & 8)
IR ^= 1;
break;
case FMOVQI:
case FMOVQX:
/* fmovq %[ix]cc, %fY, %fZ */
XR = regs->tstate >> 32;
if ((insn >> 5) & 0x80)
XR >>= 4;
XR &= 0xf;
IR = 0;
freg = ((XR >> 2) ^ XR) & 2;
switch ((insn >> 14) & 0x7) {
/* case 0: IR = 0; break; */ /* Never */
case 1: if (XR & 4) IR = 1; break; /* Equal */
case 2: if ((XR & 4) || freg) IR = 1; break; /* Less or Equal */
case 3: if (freg) IR = 1; break; /* Less */
case 4: if (XR & 5) IR = 1; break; /* Less or Equal Unsigned */
case 5: if (XR & 1) IR = 1; break; /* Carry Set */
case 6: if (XR & 8) IR = 1; break; /* Negative */
case 7: if (XR & 2) IR = 1; break; /* Overflow Set */
}
if ((insn >> 14) & 8)
IR ^= 1;
break;
case FMOVQZ:
case FMOVQLE:
case FMOVQLZ:
case FMOVQNZ:
case FMOVQGZ:
case FMOVQGE:
freg = (insn >> 14) & 0x1f;
if (!freg)
XR = 0;
else if (freg < 16)
XR = regs->u_regs[freg];
else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
flushw_user ();
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(XR, &win32->locals[freg - 16]);
} else {
struct reg_window __user *win;
flushw_user ();
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(XR, &win->locals[freg - 16]);
}
IR = 0;
switch ((insn >> 10) & 3) {
case 1: if (!XR) IR = 1; break; /* Register Zero */
case 2: if (XR <= 0) IR = 1; break; /* Register Less Than or Equal to Zero */
case 3: if (XR < 0) IR = 1; break; /* Register Less Than Zero */
}
if ((insn >> 10) & 4)
IR ^= 1;
break;
}
if (IR == 0) {
/* The fmov test was false. Do a nop instead */
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
} else if (IR == 1) {
/* Change the instruction into plain fmovq */
insn = (insn & 0x3e00001f) | 0x81a00060;
TYPE(3,3,0,3,0,0,0);
}
}
}
if (type) {
argp rs1 = NULL, rs2 = NULL, rd = NULL;
freg = (current_thread_info()->xfsr[0] >> 14) & 0xf;
if (freg != (type >> 9))
goto err;
current_thread_info()->xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs1 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & flags))
rs1 = (argp)&zero;
break;
}
switch (type & 0x7) {
case 7: FP_UNPACK_QP (QA, rs1); break;
case 6: FP_UNPACK_DP (DA, rs1); break;
case 5: FP_UNPACK_SP (SA, rs1); break;
}
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs2 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & flags))
rs2 = (argp)&zero;
break;
}
switch ((type >> 3) & 0x7) {
case 7: FP_UNPACK_QP (QB, rs2); break;
case 6: FP_UNPACK_DP (DB, rs2); break;
case 5: FP_UNPACK_SP (SB, rs2); break;
}
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rd = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flags)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
current_thread_info()->fpsaved[0] |= flags;
break;
}
switch ((insn >> 5) & 0x1ff) {
/* + */
case FADDS: FP_ADD_S (SR, SA, SB); break;
case FADDD: FP_ADD_D (DR, DA, DB); break;
case FADDQ: FP_ADD_Q (QR, QA, QB); break;
/* - */
case FSUBS: FP_SUB_S (SR, SA, SB); break;
case FSUBD: FP_SUB_D (DR, DA, DB); break;
case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
/* * */
case FMULS: FP_MUL_S (SR, SA, SB); break;
case FSMULD: FP_CONV (D, S, 1, 1, DA, SA);
FP_CONV (D, S, 1, 1, DB, SB);
case FMULD: FP_MUL_D (DR, DA, DB); break;
case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA);
FP_CONV (Q, D, 2, 1, QB, DB);
case FMULQ: FP_MUL_Q (QR, QA, QB); break;
/* / */
case FDIVS: FP_DIV_S (SR, SA, SB); break;
case FDIVD: FP_DIV_D (DR, DA, DB); break;
case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
/* sqrt */
case FSQRTS: FP_SQRT_S (SR, SB); break;
case FSQRTD: FP_SQRT_D (DR, DB); break;
case FSQRTQ: FP_SQRT_Q (QR, QB); break;
/* mov */
case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
/* float to int */
case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break;
case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break;
case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break;
/* int to float */
case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break;
/* Only Ultra-III generates these */
case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break;
case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break;
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
#endif
case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
/* float to float */
case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break;
case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break;
case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break;
case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break;
case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break;
case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break;
/* comparison */
case FCMPQ:
case FCMPEQ:
FP_CMP_Q(XR, QB, QA, 3);
if (XR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPEQ ||
FP_ISSIGNAN_Q(QA) ||
FP_ISSIGNAN_Q(QB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: xfsr = current_thread_info()->xfsr[0];
if (XR == -1) XR = 2;
switch (freg & 3) {
/* fcc0, 1, 2, 3 */
case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break;
case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break;
case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
}
current_thread_info()->xfsr[0] = xfsr;
break;
case 1: rd->s = IR; break;
case 2: rd->d = XR; break;
case 5: FP_PACK_SP (rd, SR); break;
case 6: FP_PACK_DP (rd, DR); break;
case 7: FP_PACK_QP (rd, QR); break;
}
}
if(_fex != 0)
return record_exception(regs, _fex);
/* Success and no exceptions detected. */
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
}
err: return 0;
}
|
125,136,270,699,143 | /*
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/head.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/memreg.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
extern int prom_node_root;
int show_unhandled_signals = 1;
/* At boot time we determine these two values necessary for setting
* up the segment maps and page table entries (pte's).
*/
int num_segmaps, num_contexts;
int invalid_segment;
/* various Virtual Address Cache parameters we find at boot time... */
int vac_size, vac_linesize, vac_do_hw_vac_flushes;
int vac_entries_per_context, vac_entries_per_segment;
int vac_entries_per_page;
/* Return how much physical memory we have. */
unsigned long probe_memory(void)
{
unsigned long total = 0;
int i;
for (i = 0; sp_banks[i].num_bytes; i++)
total += sp_banks[i].num_bytes;
return total;
}
extern void sun4c_complete_all_stores(void);
/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
unsigned long svaddr, unsigned long aerr,
unsigned long avaddr)
{
sun4c_complete_all_stores();
printk("FAULT: NMI received\n");
printk("SREGS: Synchronous Error %08lx\n", serr);
printk(" Synchronous Vaddr %08lx\n", svaddr);
printk(" Asynchronous Error %08lx\n", aerr);
printk(" Asynchronous Vaddr %08lx\n", avaddr);
if (sun4c_memerr_reg)
printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
printk("REGISTER DUMP:\n");
show_regs(regs);
prom_halt();
}
static void unhandled_fault(unsigned long, struct task_struct *,
struct pt_regs *) __attribute__ ((noreturn));
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
struct pt_regs *regs)
{
if((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference\n");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %08lx\n", address);
}
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
unsigned long address)
{
struct pt_regs regs;
unsigned long g2;
unsigned int insn;
int i;
i = search_extables_range(ret_pc, &g2);
switch (i) {
case 3:
/* load & store will be handled by fixup */
return 3;
case 1:
/* store will be handled by fixup, load will bump out */
/* for _to_ macros */
insn = *((unsigned int *) pc);
if ((insn >> 21) & 1)
return 1;
break;
case 2:
/* load will be handled by fixup, store will bump out */
/* for _from_ macros */
insn = *((unsigned int *) pc);
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
return 2;
break;
default:
break;
}
memset(®s, 0, sizeof (regs));
regs.pc = pc;
regs.npc = pc + 4;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop\n\t"
"nop\n\t"
"nop\n" : "=r" (regs.psr));
unhandled_fault(address, current, ®s);
/* Not reached */
return 0;
}
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, sig))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->pc, (void *)regs->u_regs[UREG_I7],
(void *)regs->u_regs[UREG_FP], code);
print_vma_addr(KERN_CONT " in ", regs->pc);
printk(KERN_CONT "\n");
}
static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
unsigned long addr)
{
siginfo_t info;
info.si_signo = sig;
info.si_code = code;
info.si_errno = 0;
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, info.si_code,
addr, current);
force_sig_info (sig, &info, current);
}
extern unsigned long safe_compute_effective_address(struct pt_regs *,
unsigned int);
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
{
unsigned int insn;
if (text_fault)
return regs->pc;
if (regs->psr & PSR_PS) {
insn = *(unsigned int *) regs->pc;
} else {
__get_user(insn, (unsigned int *) regs->pc);
}
return safe_compute_effective_address(regs, insn);
}
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
int text_fault)
{
unsigned long addr = compute_si_addr(regs, text_fault);
__do_fault_siginfo(code, sig, regs, addr);
}
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
unsigned int fixup;
unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
int fault, code;
if(text_fault)
address = regs->pc;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
code = SEGV_MAPERR;
if (!ARCH_SUN4C && address >= TASK_SIZE)
goto vmalloc_fault;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
/*
* The kernel referencing a bad kernel pointer can lock up
* a sun4c machine completely, so we must attempt recovery.
*/
if(!from_user && address >= PAGE_OFFSET)
goto bad_area;
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
if(vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
code = SEGV_ACCERR;
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
/* Allow reads even for write-only mappings */
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (from_user) {
do_fault_siginfo(code, SIGSEGV, regs, text_fault);
return;
}
/* Is this in ex_table? */
no_context:
g2 = regs->u_regs[UREG_G2];
if (!from_user) {
fixup = search_extables_range(regs->pc, &g2);
if (fixup > 10) { /* Values below are reserved for other things */
extern const unsigned __memset_start[];
extern const unsigned __memset_end[];
extern const unsigned __csum_partial_copy_start[];
extern const unsigned __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
regs->pc, fixup, g2);
#endif
if ((regs->pc >= (unsigned long)__memset_start &&
regs->pc < (unsigned long)__memset_end) ||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
regs->pc < (unsigned long)__csum_partial_copy_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
regs->u_regs[UREG_G2] = g2;
regs->pc = fixup;
regs->npc = regs->pc + 4;
return;
}
}
unhandled_fault (address, tsk, regs);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (from_user) {
pagefault_out_of_memory();
return;
}
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
pgd = tsk->active_mm->pgd + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd)) {
if (!pgd_present(*pgd_k))
goto bad_area_nosemaphore;
pgd_val(*pgd) = pgd_val(*pgd_k);
return;
}
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore;
*pmd = *pmd_k;
return;
}
}
asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
unsigned long,pte_t *);
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
pgd_t *pgdp;
pte_t *ptep;
if (text_fault) {
address = regs->pc;
} else if (!write &&
!(regs->psr & PSR_PS)) {
unsigned int insn, __user *ip;
ip = (unsigned int __user *)regs->pc;
if (!get_user(insn, ip)) {
if ((insn & 0xc1680000) == 0xc0680000)
write = 1;
}
}
if (!mm) {
/* We are oopsing. */
do_sparc_fault(regs, text_fault, write, address);
BUG(); /* P3 Oops already, you bitch */
}
pgdp = pgd_offset(mm, address);
ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
if (pgd_val(*pgdp)) {
if (write) {
if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_MODIFIED |
_SUN4C_PAGE_VALID |
_SUN4C_PAGE_DIRTY);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
}
} else {
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_VALID);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
}
}
}
/* This conditional is 'interesting'. */
if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
&& (pte_val(*ptep) & _SUN4C_PAGE_VALID))
/* Note: It is safe to not grab the MMAP semaphore here because
* we know that update_mmu_cache() will not sleep for
* any reason (at least not in the current implementation)
* and therefore there is no danger of another thread getting
* on the CPU and doing a shrink_mmap() on this vma.
*/
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
ptep);
else
do_sparc_fault(regs, text_fault, write, address);
}
/* This always deals with user addresses. */
static void force_user_fault(unsigned long address, int write)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int code;
code = SEGV_MAPERR;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
if(vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
}
up_read(&mm->mmap_sem);
return;
bad_area:
up_read(&mm->mmap_sem);
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
up_read(&mm->mmap_sem);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
static void check_stack_aligned(unsigned long sp)
{
if (sp & 0x7UL)
force_sig(SIGILL, current);
}
void window_overflow_fault(void)
{
unsigned long sp;
sp = current_thread_info()->rwbuf_stkptrs[0];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
check_stack_aligned(sp);
}
void window_underflow_fault(unsigned long sp)
{
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
check_stack_aligned(sp);
}
void window_ret_fault(struct pt_regs *regs)
{
unsigned long sp;
sp = regs->u_regs[UREG_FP];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
check_stack_aligned(sp);
}
| /*
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/head.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/memreg.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
extern int prom_node_root;
int show_unhandled_signals = 1;
/* At boot time we determine these two values necessary for setting
* up the segment maps and page table entries (pte's).
*/
int num_segmaps, num_contexts;
int invalid_segment;
/* various Virtual Address Cache parameters we find at boot time... */
int vac_size, vac_linesize, vac_do_hw_vac_flushes;
int vac_entries_per_context, vac_entries_per_segment;
int vac_entries_per_page;
/* Return how much physical memory we have. */
unsigned long probe_memory(void)
{
unsigned long total = 0;
int i;
for (i = 0; sp_banks[i].num_bytes; i++)
total += sp_banks[i].num_bytes;
return total;
}
extern void sun4c_complete_all_stores(void);
/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
unsigned long svaddr, unsigned long aerr,
unsigned long avaddr)
{
sun4c_complete_all_stores();
printk("FAULT: NMI received\n");
printk("SREGS: Synchronous Error %08lx\n", serr);
printk(" Synchronous Vaddr %08lx\n", svaddr);
printk(" Asynchronous Error %08lx\n", aerr);
printk(" Asynchronous Vaddr %08lx\n", avaddr);
if (sun4c_memerr_reg)
printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
printk("REGISTER DUMP:\n");
show_regs(regs);
prom_halt();
}
static void unhandled_fault(unsigned long, struct task_struct *,
struct pt_regs *) __attribute__ ((noreturn));
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
struct pt_regs *regs)
{
if((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference\n");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %08lx\n", address);
}
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
unsigned long address)
{
struct pt_regs regs;
unsigned long g2;
unsigned int insn;
int i;
i = search_extables_range(ret_pc, &g2);
switch (i) {
case 3:
/* load & store will be handled by fixup */
return 3;
case 1:
/* store will be handled by fixup, load will bump out */
/* for _to_ macros */
insn = *((unsigned int *) pc);
if ((insn >> 21) & 1)
return 1;
break;
case 2:
/* load will be handled by fixup, store will bump out */
/* for _from_ macros */
insn = *((unsigned int *) pc);
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
return 2;
break;
default:
break;
}
memset(®s, 0, sizeof (regs));
regs.pc = pc;
regs.npc = pc + 4;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop\n\t"
"nop\n\t"
"nop\n" : "=r" (regs.psr));
unhandled_fault(address, current, ®s);
/* Not reached */
return 0;
}
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, sig))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->pc, (void *)regs->u_regs[UREG_I7],
(void *)regs->u_regs[UREG_FP], code);
print_vma_addr(KERN_CONT " in ", regs->pc);
printk(KERN_CONT "\n");
}
static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
unsigned long addr)
{
siginfo_t info;
info.si_signo = sig;
info.si_code = code;
info.si_errno = 0;
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, info.si_code,
addr, current);
force_sig_info (sig, &info, current);
}
extern unsigned long safe_compute_effective_address(struct pt_regs *,
unsigned int);
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
{
unsigned int insn;
if (text_fault)
return regs->pc;
if (regs->psr & PSR_PS) {
insn = *(unsigned int *) regs->pc;
} else {
__get_user(insn, (unsigned int *) regs->pc);
}
return safe_compute_effective_address(regs, insn);
}
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
int text_fault)
{
unsigned long addr = compute_si_addr(regs, text_fault);
__do_fault_siginfo(code, sig, regs, addr);
}
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
unsigned int fixup;
unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
int fault, code;
if(text_fault)
address = regs->pc;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
code = SEGV_MAPERR;
if (!ARCH_SUN4C && address >= TASK_SIZE)
goto vmalloc_fault;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
down_read(&mm->mmap_sem);
/*
* The kernel referencing a bad kernel pointer can lock up
* a sun4c machine completely, so we must attempt recovery.
*/
if(!from_user && address >= PAGE_OFFSET)
goto bad_area;
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
if(vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
code = SEGV_ACCERR;
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
/* Allow reads even for write-only mappings */
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (from_user) {
do_fault_siginfo(code, SIGSEGV, regs, text_fault);
return;
}
/* Is this in ex_table? */
no_context:
g2 = regs->u_regs[UREG_G2];
if (!from_user) {
fixup = search_extables_range(regs->pc, &g2);
if (fixup > 10) { /* Values below are reserved for other things */
extern const unsigned __memset_start[];
extern const unsigned __memset_end[];
extern const unsigned __csum_partial_copy_start[];
extern const unsigned __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
regs->pc, fixup, g2);
#endif
if ((regs->pc >= (unsigned long)__memset_start &&
regs->pc < (unsigned long)__memset_end) ||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
regs->pc < (unsigned long)__csum_partial_copy_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
regs->u_regs[UREG_G2] = g2;
regs->pc = fixup;
regs->npc = regs->pc + 4;
return;
}
}
unhandled_fault (address, tsk, regs);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (from_user) {
pagefault_out_of_memory();
return;
}
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pmd_t *pmd, *pmd_k;
pgd = tsk->active_mm->pgd + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd)) {
if (!pgd_present(*pgd_k))
goto bad_area_nosemaphore;
pgd_val(*pgd) = pgd_val(*pgd_k);
return;
}
pmd = pmd_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address);
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore;
*pmd = *pmd_k;
return;
}
}
asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
unsigned long,pte_t *);
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
pgd_t *pgdp;
pte_t *ptep;
if (text_fault) {
address = regs->pc;
} else if (!write &&
!(regs->psr & PSR_PS)) {
unsigned int insn, __user *ip;
ip = (unsigned int __user *)regs->pc;
if (!get_user(insn, ip)) {
if ((insn & 0xc1680000) == 0xc0680000)
write = 1;
}
}
if (!mm) {
/* We are oopsing. */
do_sparc_fault(regs, text_fault, write, address);
BUG(); /* P3 Oops already, you bitch */
}
pgdp = pgd_offset(mm, address);
ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
if (pgd_val(*pgdp)) {
if (write) {
if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_MODIFIED |
_SUN4C_PAGE_VALID |
_SUN4C_PAGE_DIRTY);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
}
} else {
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_VALID);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
}
}
}
/* This conditional is 'interesting'. */
if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
&& (pte_val(*ptep) & _SUN4C_PAGE_VALID))
/* Note: It is safe to not grab the MMAP semaphore here because
* we know that update_mmu_cache() will not sleep for
* any reason (at least not in the current implementation)
* and therefore there is no danger of another thread getting
* on the CPU and doing a shrink_mmap() on this vma.
*/
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
ptep);
else
do_sparc_fault(regs, text_fault, write, address);
}
/* This always deals with user addresses. */
static void force_user_fault(unsigned long address, int write)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int code;
code = SEGV_MAPERR;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
if(vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
}
up_read(&mm->mmap_sem);
return;
bad_area:
up_read(&mm->mmap_sem);
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
up_read(&mm->mmap_sem);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
static void check_stack_aligned(unsigned long sp)
{
if (sp & 0x7UL)
force_sig(SIGILL, current);
}
void window_overflow_fault(void)
{
unsigned long sp;
sp = current_thread_info()->rwbuf_stkptrs[0];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
check_stack_aligned(sp);
}
void window_underflow_fault(unsigned long sp)
{
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
check_stack_aligned(sp);
}
void window_ret_fault(struct pt_regs *regs)
{
unsigned long sp;
sp = regs->u_regs[UREG_FP];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
check_stack_aligned(sp);
}
|
159,605,432,735,903 | /*
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/head.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/percpu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/uaccess.h>
#include <asm/asi.h>
#include <asm/lsu.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
int show_unhandled_signals = 1;
static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
preempt_enable();
}
return ret;
}
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
{
if ((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference\n");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %016lx\n", (unsigned long)address);
}
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
(tsk->mm ?
CTX_HWBITS(tsk->mm->context) :
CTX_HWBITS(tsk->active_mm->context)));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
{
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
regs->tpc);
printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
dump_stack();
unhandled_fault(regs->tpc, current, regs);
}
/*
* We now make sure that mmap_sem is held in all paths that call
* this. Additionally, to prevent kswapd from ripping ptes from
* under us, raise interrupts around the time that we look at the
* pte, kswapd will have to wait to get his smp ipi response from
* us. vmtruncate likewise. This saves us having to get pte lock.
*/
static unsigned int get_user_insn(unsigned long tpc)
{
pgd_t *pgdp = pgd_offset(current->mm, tpc);
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
unsigned long pa;
u32 insn = 0;
unsigned long pstate;
if (pgd_none(*pgdp))
goto outret;
pudp = pud_offset(pgdp, tpc);
if (pud_none(*pudp))
goto outret;
pmdp = pmd_offset(pudp, tpc);
if (pmd_none(*pmdp))
goto outret;
/* This disables preemption for us as well. */
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
ptep = pte_offset_map(pmdp, tpc);
pte = *ptep;
if (!pte_present(pte))
goto out;
pa = (pte_pfn(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK);
/* Use phys bypass so we don't pollute dtlb/dcache. */
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=r" (insn)
: "r" (pa), "i" (ASI_PHYS_USE_EC));
out:
pte_unmap(ptep);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
outret:
return insn;
}
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, sig))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
(void *)regs->u_regs[UREG_FP], code);
print_vma_addr(KERN_CONT " in ", regs->tpc);
printk(KERN_CONT "\n");
}
extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
unsigned int insn, int fault_code)
{
unsigned long addr;
siginfo_t info;
info.si_code = code;
info.si_signo = sig;
info.si_errno = 0;
if (fault_code & FAULT_CODE_ITLB)
addr = regs->tpc;
else
addr = compute_effective_address(regs, insn, 0);
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, code, addr, current);
force_sig_info(sig, &info, current);
}
extern int handle_ldf_stq(u32, struct pt_regs *);
extern int handle_ld_nf(u32, struct pt_regs *);
static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
{
if (!insn) {
if (!regs->tpc || (regs->tpc & 0x3))
return 0;
if (regs->tstate & TSTATE_PRIV) {
insn = *(unsigned int *) regs->tpc;
} else {
insn = get_user_insn(regs->tpc);
}
}
return insn;
}
static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
int fault_code, unsigned int insn,
unsigned long address)
{
unsigned char asi = ASI_P;
if ((!insn) && (regs->tstate & TSTATE_PRIV))
goto cannot_handle;
/* If user insn could be read (thus insn is zero), that
* is fine. We will just gun down the process with a signal
* in that case.
*/
if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
(insn & 0xc0800000) == 0xc0800000) {
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82) {
if (insn & 0x1000000) {
handle_ldf_stq(insn, regs);
} else {
/* This was a non-faulting load. Just clear the
* destination register(s) and continue with the next
* instruction. -jj
*/
handle_ld_nf(insn, regs);
}
return;
}
}
/* Is this in ex_table? */
if (regs->tstate & TSTATE_PRIV) {
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return;
}
} else {
/* The si_code was set to make clear whether
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
*/
do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
return;
}
cannot_handle:
unhandled_fault (address, current, regs);
}
static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
{
static int times;
if (times++ < 10)
printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
"64-bit TPC [%lx]\n",
current->comm, current->pid,
regs->tpc);
show_regs(regs);
}
static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
unsigned long addr)
{
static int times;
if (times++ < 10)
printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
"reports 64-bit fault address [%lx]\n",
current->comm, current->pid, addr);
show_regs(regs);
}
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned int insn = 0;
int si_code, fault_code, fault;
unsigned long address, mm_rss;
fault_code = get_thread_fault_code();
if (notify_page_fault(regs))
return;
si_code = SEGV_MAPERR;
address = current_thread_info()->fault_address;
if ((fault_code & FAULT_CODE_ITLB) &&
(fault_code & FAULT_CODE_DTLB))
BUG();
if (test_thread_flag(TIF_32BIT)) {
if (!(regs->tstate & TSTATE_PRIV)) {
if (unlikely((regs->tpc >> 32) != 0)) {
bogus_32bit_fault_tpc(regs);
goto intr_or_no_mm;
}
}
if (unlikely((address >> 32) != 0)) {
bogus_32bit_fault_address(regs, address);
goto intr_or_no_mm;
}
}
if (regs->tstate & TSTATE_PRIV) {
unsigned long tpc = regs->tpc;
/* Sanity check the PC. */
if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
(tpc >= MODULES_VADDR && tpc < MODULES_END)) {
/* Valid, no problems... */
} else {
bad_kernel_pc(regs, address);
return;
}
}
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
if (!down_read_trylock(&mm->mmap_sem)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
goto handle_kernel_fault;
}
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
* instruction to try and figure this out. It's an optimization
* so it's ok if we can't do this.
*
* Special hack, window spill/fill knows the exact fault type.
*/
if (((fault_code &
(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
(vma->vm_flags & VM_WRITE) != 0) {
insn = get_fault_insn(regs, 0);
if (!insn)
goto continue_fault;
/* All loads, stores and atomics have bits 30 and 31 both set
* in the instruction. Bit 21 is set in all stores, but we
* have to avoid prefetches which also have bit 21 set.
*/
if ((insn & 0xc0200000) == 0xc0200000 &&
(insn & 0x01780000) != 0x01680000) {
/* Don't bother updating thread struct value,
* because update_mmu_cache only cares which tlb
* the access came from.
*/
fault_code |= FAULT_CODE_WRITE;
}
}
continue_fault:
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!(fault_code & FAULT_CODE_WRITE)) {
/* Non-faulting loads shouldn't expand stack. */
insn = get_fault_insn(regs, insn);
if ((insn & 0xc0800000) == 0xc0800000) {
unsigned char asi;
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82)
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
/* If we took a ITLB miss on a non-executable page, catch
* that here.
*/
if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
BUG_ON(address != regs->tpc);
BUG_ON(regs->tstate & TSTATE_PRIV);
goto bad_area;
}
if (fault_code & FAULT_CODE_WRITE) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* Spitfire has an icache which does not snoop
* processor stores. Later processors do...
*/
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
set_thread_fault_code(fault_code |
FAULT_CODE_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
#endif
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
return;
}
goto handle_kernel_fault;
intr_or_no_mm:
insn = get_fault_insn(regs, 0);
goto handle_kernel_fault;
do_sigbus:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
goto handle_kernel_fault;
}
| /*
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/head.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/percpu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/uaccess.h>
#include <asm/asi.h>
#include <asm/lsu.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
int show_unhandled_signals = 1;
static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
preempt_enable();
}
return ret;
}
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
{
if ((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
"pointer dereference\n");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %016lx\n", (unsigned long)address);
}
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
(tsk->mm ?
CTX_HWBITS(tsk->mm->context) :
CTX_HWBITS(tsk->active_mm->context)));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
{
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
regs->tpc);
printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
dump_stack();
unhandled_fault(regs->tpc, current, regs);
}
/*
* We now make sure that mmap_sem is held in all paths that call
* this. Additionally, to prevent kswapd from ripping ptes from
* under us, raise interrupts around the time that we look at the
* pte, kswapd will have to wait to get his smp ipi response from
* us. vmtruncate likewise. This saves us having to get pte lock.
*/
static unsigned int get_user_insn(unsigned long tpc)
{
pgd_t *pgdp = pgd_offset(current->mm, tpc);
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
unsigned long pa;
u32 insn = 0;
unsigned long pstate;
if (pgd_none(*pgdp))
goto outret;
pudp = pud_offset(pgdp, tpc);
if (pud_none(*pudp))
goto outret;
pmdp = pmd_offset(pudp, tpc);
if (pmd_none(*pmdp))
goto outret;
/* This disables preemption for us as well. */
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
ptep = pte_offset_map(pmdp, tpc);
pte = *ptep;
if (!pte_present(pte))
goto out;
pa = (pte_pfn(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK);
/* Use phys bypass so we don't pollute dtlb/dcache. */
__asm__ __volatile__("lduwa [%1] %2, %0"
: "=r" (insn)
: "r" (pa), "i" (ASI_PHYS_USE_EC));
out:
pte_unmap(ptep);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
outret:
return insn;
}
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, sig))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
(void *)regs->u_regs[UREG_FP], code);
print_vma_addr(KERN_CONT " in ", regs->tpc);
printk(KERN_CONT "\n");
}
extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
unsigned int insn, int fault_code)
{
unsigned long addr;
siginfo_t info;
info.si_code = code;
info.si_signo = sig;
info.si_errno = 0;
if (fault_code & FAULT_CODE_ITLB)
addr = regs->tpc;
else
addr = compute_effective_address(regs, insn, 0);
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, code, addr, current);
force_sig_info(sig, &info, current);
}
extern int handle_ldf_stq(u32, struct pt_regs *);
extern int handle_ld_nf(u32, struct pt_regs *);
static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
{
if (!insn) {
if (!regs->tpc || (regs->tpc & 0x3))
return 0;
if (regs->tstate & TSTATE_PRIV) {
insn = *(unsigned int *) regs->tpc;
} else {
insn = get_user_insn(regs->tpc);
}
}
return insn;
}
static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
int fault_code, unsigned int insn,
unsigned long address)
{
unsigned char asi = ASI_P;
if ((!insn) && (regs->tstate & TSTATE_PRIV))
goto cannot_handle;
/* If user insn could be read (thus insn is zero), that
* is fine. We will just gun down the process with a signal
* in that case.
*/
if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
(insn & 0xc0800000) == 0xc0800000) {
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82) {
if (insn & 0x1000000) {
handle_ldf_stq(insn, regs);
} else {
/* This was a non-faulting load. Just clear the
* destination register(s) and continue with the next
* instruction. -jj
*/
handle_ld_nf(insn, regs);
}
return;
}
}
/* Is this in ex_table? */
if (regs->tstate & TSTATE_PRIV) {
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
if (entry) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return;
}
} else {
/* The si_code was set to make clear whether
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
*/
do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
return;
}
cannot_handle:
unhandled_fault (address, current, regs);
}
static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
{
static int times;
if (times++ < 10)
printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
"64-bit TPC [%lx]\n",
current->comm, current->pid,
regs->tpc);
show_regs(regs);
}
static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
unsigned long addr)
{
static int times;
if (times++ < 10)
printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
"reports 64-bit fault address [%lx]\n",
current->comm, current->pid, addr);
show_regs(regs);
}
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned int insn = 0;
int si_code, fault_code, fault;
unsigned long address, mm_rss;
fault_code = get_thread_fault_code();
if (notify_page_fault(regs))
return;
si_code = SEGV_MAPERR;
address = current_thread_info()->fault_address;
if ((fault_code & FAULT_CODE_ITLB) &&
(fault_code & FAULT_CODE_DTLB))
BUG();
if (test_thread_flag(TIF_32BIT)) {
if (!(regs->tstate & TSTATE_PRIV)) {
if (unlikely((regs->tpc >> 32) != 0)) {
bogus_32bit_fault_tpc(regs);
goto intr_or_no_mm;
}
}
if (unlikely((address >> 32) != 0)) {
bogus_32bit_fault_address(regs, address);
goto intr_or_no_mm;
}
}
if (regs->tstate & TSTATE_PRIV) {
unsigned long tpc = regs->tpc;
/* Sanity check the PC. */
if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
(tpc >= MODULES_VADDR && tpc < MODULES_END)) {
/* Valid, no problems... */
} else {
bad_kernel_pc(regs, address);
return;
}
}
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (!down_read_trylock(&mm->mmap_sem)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
goto handle_kernel_fault;
}
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
* instruction to try and figure this out. It's an optimization
* so it's ok if we can't do this.
*
* Special hack, window spill/fill knows the exact fault type.
*/
if (((fault_code &
(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
(vma->vm_flags & VM_WRITE) != 0) {
insn = get_fault_insn(regs, 0);
if (!insn)
goto continue_fault;
/* All loads, stores and atomics have bits 30 and 31 both set
* in the instruction. Bit 21 is set in all stores, but we
* have to avoid prefetches which also have bit 21 set.
*/
if ((insn & 0xc0200000) == 0xc0200000 &&
(insn & 0x01780000) != 0x01680000) {
/* Don't bother updating thread struct value,
* because update_mmu_cache only cares which tlb
* the access came from.
*/
fault_code |= FAULT_CODE_WRITE;
}
}
continue_fault:
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!(fault_code & FAULT_CODE_WRITE)) {
/* Non-faulting loads shouldn't expand stack. */
insn = get_fault_insn(regs, insn);
if ((insn & 0xc0800000) == 0xc0800000) {
unsigned char asi;
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82)
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
/* If we took a ITLB miss on a non-executable page, catch
* that here.
*/
if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
BUG_ON(address != regs->tpc);
BUG_ON(regs->tstate & TSTATE_PRIV);
goto bad_area;
}
if (fault_code & FAULT_CODE_WRITE) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* Spitfire has an icache which does not snoop
* processor stores. Later processors do...
*/
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
set_thread_fault_code(fault_code |
FAULT_CODE_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
#endif
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
return;
}
goto handle_kernel_fault;
intr_or_no_mm:
insn = get_fault_insn(regs, 0);
goto handle_kernel_fault;
do_sigbus:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
goto handle_kernel_fault;
}
|
48,574,509,426,791 | /*
* Performance events x86 architecture code
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/compat.h>
#include <asm/smp.h>
#include <asm/alternative.h>
#if 0
#undef wrmsrl
#define wrmsrl(msr, val) \
do { \
trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
(unsigned long)(val)); \
native_write_msr((msr), (u32)((u64)(val)), \
(u32)((u64)(val) >> 32)); \
} while (0)
#endif
/*
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
*/
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
unsigned long size, len = 0;
struct page *page;
void *map;
int ret;
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
break;
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page);
memcpy(to, map+offset, size);
kunmap_atomic(map);
put_page(page);
len += size;
to += size;
addr += size;
} while (len < n);
return len;
}
struct event_constraint {
union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
u64 code;
u64 cmask;
int weight;
};
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
struct perf_event *owners[X86_PMC_IDX_MAX];
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
struct intel_percore;
#define MAX_LBR_ENTRIES 16
struct cpu_hw_events {
/*
* Generic x86 PMC bits
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
unsigned int group_flag;
/*
* Intel DebugStore bits
*/
struct debug_store *ds;
u64 pebs_enabled;
/*
* Intel LBR bits
*/
int lbr_users;
void *lbr_context;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
* Intel percore register state.
* Coordinate shared resources between HT threads.
*/
int percore_used; /* Used by this CPU? */
struct intel_percore *per_core;
/*
* AMD specific bits
*/
struct amd_nb *amd_nb;
};
#define __EVENT_CONSTRAINT(c, n, m, w) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
}
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
/*
* Constraint on the Event code.
*/
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
* the following filters disqualify for fixed counters:
* - inv
* - edge
* - cnt-mask
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
#define FIXED_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->weight; (e)++)
/*
* Extra registers for specific events.
* Some events need large masks and require external MSRs.
* Define a mapping to these extra registers.
*/
struct extra_reg {
unsigned int event;
unsigned int msr;
u64 config_mask;
u64 valid_mask;
};
#define EVENT_EXTRA_REG(e, ms, m, vm) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
union perf_capabilities {
struct {
u64 lbr_format : 6;
u64 pebs_trap : 1;
u64 pebs_arch_reg : 1;
u64 pebs_format : 4;
u64 smm_freeze : 1;
};
u64 capabilities;
};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
/*
* Generic x86 PMC bits
*/
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
void (*hw_watchdog_set_attr)(struct perf_event_attr *attr);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
int max_events;
int num_counters;
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
int apic;
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
struct event_constraint *percore_constraints;
void (*quirks)(void);
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
/*
* Intel Arch Perfmon v2+
*/
u64 intel_ctrl;
union perf_capabilities intel_cap;
/*
* Intel DebugStore bits
*/
int bts, pebs;
int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
/*
* Intel LBR
*/
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
int lbr_nr; /* hardware stack size */
/*
* Extra registers for events
*/
struct extra_reg *extra_regs;
};
static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
static int x86_perf_event_set_period(struct perf_event *event);
/*
* Generalized hw caching related hw_event table, filled
* in on a per model basis. A value of 0 means
* 'not supported', -1 means 'hw_event makes no sense on
* this CPU', any other value means the raw hw_event
* ID.
*/
#define C(x) PERF_COUNT_HW_CACHE_##x
static u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
static u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
void hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr)
{
if (x86_pmu.hw_watchdog_set_attr)
x86_pmu.hw_watchdog_set_attr(wd_attr);
}
/*
* Propagate event elapsed time into the generic event.
* Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
static u64
x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
/*
* Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic event atomically:
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
rdmsrl(hwc->event_base, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static inline int x86_pmu_addr_offset(int index)
{
int offset;
/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
alternative_io(ASM_NOP2,
"shll $1, %%eax",
X86_FEATURE_PERFCTR_CORE,
"=a" (offset),
"a" (index));
return offset;
}
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
}
static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
}
/*
* Find and validate any extra registers to set up.
*/
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{
struct extra_reg *er;
event->hw.extra_reg = 0;
event->hw.extra_config = 0;
if (!x86_pmu.extra_regs)
return 0;
for (er = x86_pmu.extra_regs; er->msr; er++) {
if (er->event != (config & er->config_mask))
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
event->hw.extra_reg = er->msr;
event->hw.extra_config = event->attr.config1;
break;
}
return 0;
}
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
static bool reserve_pmc_hardware(void)
{
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}
return true;
eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu_config_addr(i));
i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
release_perfctr_nmi(x86_pmu_event_addr(i));
return false;
}
static void release_pmc_hardware(void)
{
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i));
}
}
#else
static bool reserve_pmc_hardware(void) { return true; }
static void release_pmc_hardware(void) {}
#endif
static bool check_hw_exists(void)
{
u64 val, val_new = 0;
int i, reg, ret = 0;
/*
* Check to see if the BIOS enabled any of the counters, if so
* complain and bail.
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
goto bios_fail;
}
if (x86_pmu.num_counters_fixed) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
if (val & (0x03 << i*4))
goto bios_fail;
}
}
/*
* Now write a value and read it back to see if it matches,
* this is needed to detect certain hardware emulators (qemu/kvm)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
if (ret || val != val_new)
goto msr_fail;
return true;
bios_fail:
/*
* We still allow the PMU driver to operate:
*/
printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
return true;
msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
return false;
}
static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
static inline int x86_pmu_initialized(void)
{
return x86_pmu.handle_irq != NULL;
}
static inline int
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
unsigned int cache_type, cache_op, cache_result;
u64 config, val;
config = attr->config;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
if (val == 0)
return -ENOENT;
if (val == -1)
return -EINVAL;
hwc->config |= val;
attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
return x86_pmu_extra_regs(val, event);
}
static int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
u64 config;
if (!is_sampling_event(event)) {
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
} else {
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
* events (user-space has to fall back and
* sample via a hrtimer based software event):
*/
if (!x86_pmu.apic)
return -EOPNOTSUPP;
}
/*
* Do not allow config1 (extended registers) to propagate,
* there's no sane user-space generalization yet:
*/
if (attr->type == PERF_TYPE_RAW)
return 0;
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
config = x86_pmu.event_map(attr->config);
if (config == 0)
return -ENOENT;
if (config == -1LL)
return -EINVAL;
/*
* Branch tracing:
*/
if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
!attr->freq && hwc->sample_period == 1) {
/* BTS is not supported by this architecture. */
if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
if (!attr->exclude_kernel)
return -EOPNOTSUPP;
}
hwc->config |= config;
return 0;
}
static int x86_pmu_hw_config(struct perf_event *event)
{
if (event->attr.precise_ip) {
int precise = 0;
/* Support for constant skid */
if (x86_pmu.pebs_active) {
precise++;
/* Support for IP fixup */
if (x86_pmu.lbr_nr)
precise++;
}
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
}
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
/*
* Count user and OS events unless requested not to
*/
if (!event->attr.exclude_user)
event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
if (!event->attr.exclude_kernel)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
return x86_setup_perfctr(event);
}
/*
* Setup the hardware configuration for a given attr_type
*/
static int __x86_pmu_event_init(struct perf_event *event)
{
int err;
if (!x86_pmu_initialized())
return -ENODEV;
err = 0;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
return err;
event->destroy = hw_perf_event_destroy;
event->hw.idx = -1;
event->hw.last_cpu = -1;
event->hw.last_tag = ~0ULL;
return x86_pmu.hw_config(event);
}
static void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu_config_addr(idx), val);
}
}
static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!x86_pmu_initialized())
return;
if (!cpuc->enabled)
return;
cpuc->n_added = 0;
cpuc->enabled = 0;
barrier();
x86_pmu.disable_all();
}
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
if (hwc->extra_reg)
wrmsrl(hwc->extra_reg, hwc->extra_config);
wrmsrl(hwc->config_base, hwc->config | enable_mask);
}
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
static struct pmu pmu;
static inline int is_x86_event(struct perf_event *event)
{
return event->pmu == &pmu;
}
static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int i, j, w, wmax, num = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
for (i = 0; i < n; i++) {
c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
constraints[i] = c;
}
/*
* fastpath, try to reuse previous register
*/
for (i = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw;
c = constraints[i];
/* never assigned */
if (hwc->idx == -1)
break;
/* constraint still honored */
if (!test_bit(hwc->idx, c->idxmsk))
break;
/* not already used */
if (test_bit(hwc->idx, used_mask))
break;
__set_bit(hwc->idx, used_mask);
if (assign)
assign[i] = hwc->idx;
}
if (i == n)
goto done;
/*
* begin slow path
*/
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
/*
* weight = number of possible counters
*
* 1 = most constrained, only works on one counter
* wmax = least constrained, works on any counter
*
* assign events to counters starting with most
* constrained events.
*/
wmax = x86_pmu.num_counters;
/*
* when fixed event counters are present,
* wmax is incremented by 1 to account
* for one more choice
*/
if (x86_pmu.num_counters_fixed)
wmax++;
for (w = 1, num = n; num && w <= wmax; w++) {
/* for each event */
for (i = 0; num && i < n; i++) {
c = constraints[i];
hwc = &cpuc->event_list[i]->hw;
if (c->weight != w)
continue;
for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
if (!test_bit(j, used_mask))
break;
}
if (j == X86_PMC_IDX_MAX)
break;
__set_bit(j, used_mask);
if (assign)
assign[i] = j;
num--;
}
}
done:
/*
* scheduling failed or is just a simulation,
* free resources if necessary
*/
if (!assign || num) {
for (i = 0; i < n; i++) {
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
return num ? -ENOSPC : 0;
}
/*
* dogrp: true if must collect siblings events (group)
* returns total number of events and error code
*/
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
struct perf_event *event;
int n, max_count;
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
/* current number of events already accepted */
n = cpuc->n_events;
if (is_x86_event(leader)) {
if (n >= max_count)
return -ENOSPC;
cpuc->event_list[n] = leader;
n++;
}
if (!dogrp)
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
if (!is_x86_event(event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;
if (n >= max_count)
return -ENOSPC;
cpuc->event_list[n] = event;
n++;
}
return n;
}
static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
}
static inline int match_prev_assignment(struct hw_perf_event *hwc,
struct cpu_hw_events *cpuc,
int i)
{
return hwc->idx == cpuc->assign[i] &&
hwc->last_cpu == smp_processor_id() &&
hwc->last_tag == cpuc->tags[i];
}
static void x86_pmu_start(struct perf_event *event, int flags);
static void x86_pmu_stop(struct perf_event *event, int flags);
static void x86_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i, added = cpuc->n_added;
if (!x86_pmu_initialized())
return;
if (cpuc->enabled)
return;
if (cpuc->n_added) {
int n_running = cpuc->n_events - cpuc->n_added;
/*
* apply assignment obtained either from
* hw_perf_group_sched_in() or x86_pmu_enable()
*
* step1: save events moving to new counters
* step2: reprogram moved events into new counters
*/
for (i = 0; i < n_running; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
/*
* we can avoid reprogramming counter if:
* - assigned same counter as last time
* - running on same CPU as last time
* - no other event has used the counter since
*/
if (hwc->idx == -1 ||
match_prev_assignment(hwc, cpuc, i))
continue;
/*
* Ensure we don't accidentally enable a stopped
* counter simply because we rescheduled.
*/
if (hwc->state & PERF_HES_STOPPED)
hwc->state |= PERF_HES_ARCH;
x86_pmu_stop(event, PERF_EF_UPDATE);
}
for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
if (!match_prev_assignment(hwc, cpuc, i))
x86_assign_hw_event(event, cpuc, i);
else if (i < n_running)
continue;
if (hwc->state & PERF_HES_ARCH)
continue;
x86_pmu_start(event, PERF_EF_RELOAD);
}
cpuc->n_added = 0;
perf_events_lapic_init();
}
cpuc->enabled = 1;
barrier();
x86_pmu.enable_all(added);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
wrmsrl(hwc->config_base, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the event disabled in hw:
*/
static int
x86_perf_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
/*
* If we are way outside a reasonable range then just skip forward:
*/
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Quirk: certain CPUs dont like it if just 1 hw_event is left:
*/
if (unlikely(left < 2))
left = 2;
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
* The hw event starts counting from this event offset,
* mark it to be able to extra future deltas:
*/
local64_set(&hwc->prev_count, (u64)-left);
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
* Due to erratum on certan cpu we need
* a second write to be sure the register
* is updated properly
*/
if (x86_pmu.perfctr_second_write) {
wrmsrl(hwc->event_base,
(u64)(-left) & x86_pmu.cntval_mask);
}
perf_event_update_userpage(event);
return ret;
}
static void x86_pmu_enable_event(struct perf_event *event)
{
if (__this_cpu_read(cpu_hw_events.enabled))
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
* Add a single event to the PMU.
*
* The event is added to the group of enabled events
* but only if it can be scehduled with existing events.
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc;
int assign[X86_PMC_IDX_MAX];
int n, n0, ret;
hwc = &event->hw;
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
goto out;
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time (->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
goto out;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
done_collect:
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
ret = 0;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static void x86_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
x86_perf_event_set_period(event);
}
event->hw.state = 0;
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running);
x86_pmu.enable(event);
perf_event_update_userpage(event);
}
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
u64 pebs;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_events, cpu);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu);
pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl);
pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
local_irq_restore(flags);
}
static void x86_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
/*
* Drain the remaining delta count out of a event
* that we are disabling:
*/
x86_perf_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
static void x86_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int i;
/*
* If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
return;
x86_pmu_stop(event, PERF_EF_UPDATE);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
while (++i < cpuc->n_events)
cpuc->event_list[i-1] = cpuc->event_list[i];
--cpuc->n_events;
break;
}
}
perf_event_update_userpage(event);
}
static int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
int idx, handled = 0;
u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This generic handler doesn't seem to have any issues where the
* unmasking occurs so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;
/*
* event overflow
*/
handled++;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
x86_pmu_stop(event, 0);
}
if (handled)
inc_irq_stat(apic_perf_irqs);
return handled;
}
void perf_events_lapic_init(void)
{
if (!x86_pmu.apic || !x86_pmu_initialized())
return;
/*
* Always use NMI for PMU
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
struct pmu_nmi_state {
unsigned int marked;
int handled;
};
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static struct event_constraint unconstrained;
static struct event_constraint emptyconstraint;
static struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_p4.c"
#include "perf_event_intel_lbr.c"
#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
case CPU_STARTING:
if (x86_pmu.cpu_starting)
x86_pmu.cpu_starting(cpu);
break;
case CPU_DYING:
if (x86_pmu.cpu_dying)
x86_pmu.cpu_dying(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
break;
default:
break;
}
return ret;
}
static void __init pmu_check_apic(void)
{
if (cpu_has_apic)
return;
x86_pmu.apic = 0;
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
pr_info("no hardware sampling interrupt available.\n");
}
static int __init init_hw_perf_events(void)
{
struct event_constraint *c;
int err;
pr_info("Performance Events: ");
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
err = intel_pmu_init();
break;
case X86_VENDOR_AMD:
err = amd_pmu_init();
break;
default:
return 0;
}
if (err != 0) {
pr_cont("no PMU driver, software events only.\n");
return 0;
}
pmu_check_apic();
/* sanity check that the hardware exists or is emulated */
if (!check_hw_exists())
return 0;
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.quirks)
x86_pmu.quirks();
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters);
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK)
continue;
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(x86_pmu_notifier);
return 0;
}
early_initcall(init_hw_perf_events);
static inline void x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void x86_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
__this_cpu_write(cpu_hw_events.n_txn, 0);
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void x86_pmu_cancel_txn(struct pmu *pmu)
{
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
/*
* Truncate the collected events.
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int x86_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
int n, ret;
n = cpuc->n_events;
if (!x86_pmu_initialized())
return -EAGAIN;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* validate that we can schedule this event
*/
static int validate_event(struct perf_event *event)
{
struct cpu_hw_events *fake_cpuc;
struct event_constraint *c;
int ret = 0;
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
if (!fake_cpuc)
return -ENOMEM;
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
ret = -ENOSPC;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
kfree(fake_cpuc);
return ret;
}
/*
* validate a single event group
*
* validation include:
* - check events are compatible which each other
* - events do not compete for the same counter
* - number of events <= number of counters
*
* validation ensures the group can be loaded onto the
* PMU if it was the only group available.
*/
static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
int ret, n;
ret = -ENOMEM;
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
if (!fake_cpuc)
goto out;
/*
* the event is not yet connected with its
* siblings therefore we must first collect
* existing siblings, then add the new event
* before we can simulate the scheduling
*/
ret = -ENOSPC;
n = collect_events(fake_cpuc, leader, true);
if (n < 0)
goto out_free;
fake_cpuc->n_events = n;
n = collect_events(fake_cpuc, event, false);
if (n < 0)
goto out_free;
fake_cpuc->n_events = n;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out_free:
kfree(fake_cpuc);
out:
return ret;
}
static int x86_pmu_event_init(struct perf_event *event)
{
struct pmu *tmp;
int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
err = __x86_pmu_event_init(event);
if (!err) {
/*
* we temporarily connect event to its pmu
* such that validate_group() can classify
* it as an x86 event using is_x86_event()
*/
tmp = event->pmu;
event->pmu = &pmu;
if (event->group_leader != event)
err = validate_group(event);
else
err = validate_event(event);
event->pmu = tmp;
}
if (err) {
if (event->destroy)
event->destroy(event);
}
return err;
}
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
.event_init = x86_pmu_event_init,
.add = x86_pmu_add,
.del = x86_pmu_del,
.start = x86_pmu_start,
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.start_txn = x86_pmu_start_txn,
.cancel_txn = x86_pmu_cancel_txn,
.commit_txn = x86_pmu_commit_txn,
};
/*
* callchain support
*/
static int backtrace_stack(void *data, char *name)
{
return 0;
}
static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, addr);
}
static const struct stacktrace_ops backtrace_ops = {
.stack = backtrace_stack,
.address = backtrace_address,
.walk_stack = print_context_stack_bp,
};
void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
}
#ifdef CONFIG_COMPAT
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
fp = compat_ptr(regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if (fp < compat_ptr(regs->sp))
break;
perf_callchain_store(entry, frame.return_address);
fp = compat_ptr(frame.next_frame);
}
return 1;
}
#else
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
return 0;
}
#endif
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
if (perf_callchain_user32(regs, entry))
return;
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if ((unsigned long)fp < regs->sp)
break;
perf_callchain_store(entry, frame.return_address);
fp = frame.next_frame;
}
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
ip = perf_guest_cbs->get_guest_ip();
else
ip = instruction_pointer(regs);
return ip;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
if (regs->flags & PERF_EFLAGS_EXACT)
misc |= PERF_RECORD_MISC_EXACT_IP;
return misc;
}
| /*
* Performance events x86 architecture code
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
* For licencing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/compat.h>
#include <asm/smp.h>
#include <asm/alternative.h>
#if 0
#undef wrmsrl
#define wrmsrl(msr, val) \
do { \
trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
(unsigned long)(val)); \
native_write_msr((msr), (u32)((u64)(val)), \
(u32)((u64)(val) >> 32)); \
} while (0)
#endif
/*
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
*/
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
unsigned long size, len = 0;
struct page *page;
void *map;
int ret;
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
break;
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page);
memcpy(to, map+offset, size);
kunmap_atomic(map);
put_page(page);
len += size;
to += size;
addr += size;
} while (len < n);
return len;
}
struct event_constraint {
union {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
u64 code;
u64 cmask;
int weight;
};
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
struct perf_event *owners[X86_PMC_IDX_MAX];
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};
struct intel_percore;
#define MAX_LBR_ENTRIES 16
struct cpu_hw_events {
/*
* Generic x86 PMC bits
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
int n_added;
int n_txn;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
unsigned int group_flag;
/*
* Intel DebugStore bits
*/
struct debug_store *ds;
u64 pebs_enabled;
/*
* Intel LBR bits
*/
int lbr_users;
void *lbr_context;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
/*
* Intel percore register state.
* Coordinate shared resources between HT threads.
*/
int percore_used; /* Used by this CPU? */
struct intel_percore *per_core;
/*
* AMD specific bits
*/
struct amd_nb *amd_nb;
};
#define __EVENT_CONSTRAINT(c, n, m, w) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
}
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
/*
* Constraint on the Event code.
*/
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
* the following filters disqualify for fixed counters:
* - inv
* - edge
* - cnt-mask
* The other filters are supported by fixed counters.
* The any-thread option is supported starting with v3.
*/
#define FIXED_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
/*
* Constraint on the Event code + UMask
*/
#define INTEL_UEVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
#define EVENT_CONSTRAINT_END \
EVENT_CONSTRAINT(0, 0, 0)
#define for_each_event_constraint(e, c) \
for ((e) = (c); (e)->weight; (e)++)
/*
* Extra registers for specific events.
* Some events need large masks and require external MSRs.
* Define a mapping to these extra registers.
*/
struct extra_reg {
unsigned int event;
unsigned int msr;
u64 config_mask;
u64 valid_mask;
};
#define EVENT_EXTRA_REG(e, ms, m, vm) { \
.event = (e), \
.msr = (ms), \
.config_mask = (m), \
.valid_mask = (vm), \
}
#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \
EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm)
#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0)
union perf_capabilities {
struct {
u64 lbr_format : 6;
u64 pebs_trap : 1;
u64 pebs_arch_reg : 1;
u64 pebs_format : 4;
u64 smm_freeze : 1;
};
u64 capabilities;
};
/*
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
/*
* Generic x86 PMC bits
*/
const char *name;
int version;
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(int added);
void (*enable)(struct perf_event *);
void (*disable)(struct perf_event *);
void (*hw_watchdog_set_attr)(struct perf_event_attr *attr);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
int max_events;
int num_counters;
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
int apic;
u64 max_period;
struct event_constraint *
(*get_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event);
struct event_constraint *event_constraints;
struct event_constraint *percore_constraints;
void (*quirks)(void);
int perfctr_second_write;
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
/*
* Intel Arch Perfmon v2+
*/
u64 intel_ctrl;
union perf_capabilities intel_cap;
/*
* Intel DebugStore bits
*/
int bts, pebs;
int bts_active, pebs_active;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
/*
* Intel LBR
*/
unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
int lbr_nr; /* hardware stack size */
/*
* Extra registers for events
*/
struct extra_reg *extra_regs;
};
static struct x86_pmu x86_pmu __read_mostly;
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
static int x86_perf_event_set_period(struct perf_event *event);
/*
* Generalized hw caching related hw_event table, filled
* in on a per model basis. A value of 0 means
* 'not supported', -1 means 'hw_event makes no sense on
* this CPU', any other value means the raw hw_event
* ID.
*/
#define C(x) PERF_COUNT_HW_CACHE_##x
static u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
static u64 __read_mostly hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
void hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr)
{
if (x86_pmu.hw_watchdog_set_attr)
x86_pmu.hw_watchdog_set_attr(wd_attr);
}
/*
* Propagate event elapsed time into the generic event.
* Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
static u64
x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
/*
* Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic event atomically:
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
rdmsrl(hwc->event_base, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
static inline int x86_pmu_addr_offset(int index)
{
int offset;
/* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
alternative_io(ASM_NOP2,
"shll $1, %%eax",
X86_FEATURE_PERFCTR_CORE,
"=a" (offset),
"a" (index));
return offset;
}
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + x86_pmu_addr_offset(index);
}
static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + x86_pmu_addr_offset(index);
}
/*
* Find and validate any extra registers to set up.
*/
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{
struct extra_reg *er;
event->hw.extra_reg = 0;
event->hw.extra_config = 0;
if (!x86_pmu.extra_regs)
return 0;
for (er = x86_pmu.extra_regs; er->msr; er++) {
if (er->event != (config & er->config_mask))
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
event->hw.extra_reg = er->msr;
event->hw.extra_config = event->attr.config1;
break;
}
return 0;
}
static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
static bool reserve_pmc_hardware(void)
{
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}
return true;
eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu_config_addr(i));
i = x86_pmu.num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
release_perfctr_nmi(x86_pmu_event_addr(i));
return false;
}
static void release_pmc_hardware(void)
{
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i));
}
}
#else
static bool reserve_pmc_hardware(void) { return true; }
static void release_pmc_hardware(void) {}
#endif
static bool check_hw_exists(void)
{
u64 val, val_new = 0;
int i, reg, ret = 0;
/*
* Check to see if the BIOS enabled any of the counters, if so
* complain and bail.
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
goto bios_fail;
}
if (x86_pmu.num_counters_fixed) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
if (val & (0x03 << i*4))
goto bios_fail;
}
}
/*
* Now write a value and read it back to see if it matches,
* this is needed to detect certain hardware emulators (qemu/kvm)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
if (ret || val != val_new)
goto msr_fail;
return true;
bios_fail:
/*
* We still allow the PMU driver to operate:
*/
printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
return true;
msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
return false;
}
static void reserve_ds_buffers(void);
static void release_ds_buffers(void);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_ds_buffers();
mutex_unlock(&pmc_reserve_mutex);
}
}
static inline int x86_pmu_initialized(void)
{
return x86_pmu.handle_irq != NULL;
}
static inline int
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
unsigned int cache_type, cache_op, cache_result;
u64 config, val;
config = attr->config;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
if (val == 0)
return -ENOENT;
if (val == -1)
return -EINVAL;
hwc->config |= val;
attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
return x86_pmu_extra_regs(val, event);
}
static int x86_setup_perfctr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
u64 config;
if (!is_sampling_event(event)) {
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
} else {
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
* events (user-space has to fall back and
* sample via a hrtimer based software event):
*/
if (!x86_pmu.apic)
return -EOPNOTSUPP;
}
/*
* Do not allow config1 (extended registers) to propagate,
* there's no sane user-space generalization yet:
*/
if (attr->type == PERF_TYPE_RAW)
return 0;
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
config = x86_pmu.event_map(attr->config);
if (config == 0)
return -ENOENT;
if (config == -1LL)
return -EINVAL;
/*
* Branch tracing:
*/
if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
!attr->freq && hwc->sample_period == 1) {
/* BTS is not supported by this architecture. */
if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
/* BTS is currently only allowed for user-mode. */
if (!attr->exclude_kernel)
return -EOPNOTSUPP;
}
hwc->config |= config;
return 0;
}
static int x86_pmu_hw_config(struct perf_event *event)
{
if (event->attr.precise_ip) {
int precise = 0;
/* Support for constant skid */
if (x86_pmu.pebs_active) {
precise++;
/* Support for IP fixup */
if (x86_pmu.lbr_nr)
precise++;
}
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
}
/*
* Generate PMC IRQs:
* (keep 'enabled' bit clear for now)
*/
event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
/*
* Count user and OS events unless requested not to
*/
if (!event->attr.exclude_user)
event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
if (!event->attr.exclude_kernel)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
if (event->attr.type == PERF_TYPE_RAW)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
return x86_setup_perfctr(event);
}
/*
* Setup the hardware configuration for a given attr_type
*/
static int __x86_pmu_event_init(struct perf_event *event)
{
int err;
if (!x86_pmu_initialized())
return -ENODEV;
err = 0;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
reserve_ds_buffers();
}
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
return err;
event->destroy = hw_perf_event_destroy;
event->hw.idx = -1;
event->hw.last_cpu = -1;
event->hw.last_tag = ~0ULL;
return x86_pmu.hw_config(event);
}
static void x86_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu_config_addr(idx), val);
}
}
static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!x86_pmu_initialized())
return;
if (!cpuc->enabled)
return;
cpuc->n_added = 0;
cpuc->enabled = 0;
barrier();
x86_pmu.disable_all();
}
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
if (hwc->extra_reg)
wrmsrl(hwc->extra_reg, hwc->extra_config);
wrmsrl(hwc->config_base, hwc->config | enable_mask);
}
static void x86_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
if (!test_bit(idx, cpuc->active_mask))
continue;
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}
static struct pmu pmu;
static inline int is_x86_event(struct perf_event *event)
{
return event->pmu == &pmu;
}
static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int i, j, w, wmax, num = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
for (i = 0; i < n; i++) {
c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
constraints[i] = c;
}
/*
* fastpath, try to reuse previous register
*/
for (i = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw;
c = constraints[i];
/* never assigned */
if (hwc->idx == -1)
break;
/* constraint still honored */
if (!test_bit(hwc->idx, c->idxmsk))
break;
/* not already used */
if (test_bit(hwc->idx, used_mask))
break;
__set_bit(hwc->idx, used_mask);
if (assign)
assign[i] = hwc->idx;
}
if (i == n)
goto done;
/*
* begin slow path
*/
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
/*
* weight = number of possible counters
*
* 1 = most constrained, only works on one counter
* wmax = least constrained, works on any counter
*
* assign events to counters starting with most
* constrained events.
*/
wmax = x86_pmu.num_counters;
/*
* when fixed event counters are present,
* wmax is incremented by 1 to account
* for one more choice
*/
if (x86_pmu.num_counters_fixed)
wmax++;
for (w = 1, num = n; num && w <= wmax; w++) {
/* for each event */
for (i = 0; num && i < n; i++) {
c = constraints[i];
hwc = &cpuc->event_list[i]->hw;
if (c->weight != w)
continue;
for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
if (!test_bit(j, used_mask))
break;
}
if (j == X86_PMC_IDX_MAX)
break;
__set_bit(j, used_mask);
if (assign)
assign[i] = j;
num--;
}
}
done:
/*
* scheduling failed or is just a simulation,
* free resources if necessary
*/
if (!assign || num) {
for (i = 0; i < n; i++) {
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
return num ? -ENOSPC : 0;
}
/*
* dogrp: true if must collect siblings events (group)
* returns total number of events and error code
*/
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
struct perf_event *event;
int n, max_count;
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
/* current number of events already accepted */
n = cpuc->n_events;
if (is_x86_event(leader)) {
if (n >= max_count)
return -ENOSPC;
cpuc->event_list[n] = leader;
n++;
}
if (!dogrp)
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
if (!is_x86_event(event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;
if (n >= max_count)
return -ENOSPC;
cpuc->event_list[n] = event;
n++;
}
return n;
}
static inline void x86_assign_hw_event(struct perf_event *event,
struct cpu_hw_events *cpuc, int i)
{
struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
}
}
static inline int match_prev_assignment(struct hw_perf_event *hwc,
struct cpu_hw_events *cpuc,
int i)
{
return hwc->idx == cpuc->assign[i] &&
hwc->last_cpu == smp_processor_id() &&
hwc->last_tag == cpuc->tags[i];
}
static void x86_pmu_start(struct perf_event *event, int flags);
static void x86_pmu_stop(struct perf_event *event, int flags);
static void x86_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i, added = cpuc->n_added;
if (!x86_pmu_initialized())
return;
if (cpuc->enabled)
return;
if (cpuc->n_added) {
int n_running = cpuc->n_events - cpuc->n_added;
/*
* apply assignment obtained either from
* hw_perf_group_sched_in() or x86_pmu_enable()
*
* step1: save events moving to new counters
* step2: reprogram moved events into new counters
*/
for (i = 0; i < n_running; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
/*
* we can avoid reprogramming counter if:
* - assigned same counter as last time
* - running on same CPU as last time
* - no other event has used the counter since
*/
if (hwc->idx == -1 ||
match_prev_assignment(hwc, cpuc, i))
continue;
/*
* Ensure we don't accidentally enable a stopped
* counter simply because we rescheduled.
*/
if (hwc->state & PERF_HES_STOPPED)
hwc->state |= PERF_HES_ARCH;
x86_pmu_stop(event, PERF_EF_UPDATE);
}
for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
if (!match_prev_assignment(hwc, cpuc, i))
x86_assign_hw_event(event, cpuc, i);
else if (i < n_running)
continue;
if (hwc->state & PERF_HES_ARCH)
continue;
x86_pmu_start(event, PERF_EF_RELOAD);
}
cpuc->n_added = 0;
perf_events_lapic_init();
}
cpuc->enabled = 1;
barrier();
x86_pmu.enable_all(added);
}
static inline void x86_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
wrmsrl(hwc->config_base, hwc->config);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
* To be called with the event disabled in hw:
*/
static int
x86_perf_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx;
if (idx == X86_PMC_IDX_FIXED_BTS)
return 0;
/*
* If we are way outside a reasonable range then just skip forward:
*/
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Quirk: certain CPUs dont like it if just 1 hw_event is left:
*/
if (unlikely(left < 2))
left = 2;
if (left > x86_pmu.max_period)
left = x86_pmu.max_period;
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
* The hw event starts counting from this event offset,
* mark it to be able to extra future deltas:
*/
local64_set(&hwc->prev_count, (u64)-left);
wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
/*
* Due to erratum on certan cpu we need
* a second write to be sure the register
* is updated properly
*/
if (x86_pmu.perfctr_second_write) {
wrmsrl(hwc->event_base,
(u64)(-left) & x86_pmu.cntval_mask);
}
perf_event_update_userpage(event);
return ret;
}
static void x86_pmu_enable_event(struct perf_event *event)
{
if (__this_cpu_read(cpu_hw_events.enabled))
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
* Add a single event to the PMU.
*
* The event is added to the group of enabled events
* but only if it can be scehduled with existing events.
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc;
int assign[X86_PMC_IDX_MAX];
int n, n0, ret;
hwc = &event->hw;
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
ret = n = collect_events(cpuc, event, false);
if (ret < 0)
goto out;
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be performed
* at commit time (->commit_txn) as a whole
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
goto out;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
done_collect:
cpuc->n_events = n;
cpuc->n_added += n - n0;
cpuc->n_txn += n - n0;
ret = 0;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static void x86_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
x86_perf_event_set_period(event);
}
event->hw.state = 0;
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running);
x86_pmu.enable(event);
perf_event_update_userpage(event);
}
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
u64 pebs;
struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_events, cpu);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("\n");
pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count);
prev_left = per_cpu(pmc_prev_left[idx], cpu);
pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl);
pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
local_irq_restore(flags);
}
static void x86_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
/*
* Drain the remaining delta count out of a event
* that we are disabling:
*/
x86_perf_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
}
static void x86_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int i;
/*
* If we're called during a txn, we don't need to do anything.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
*/
if (cpuc->group_flag & PERF_EVENT_TXN)
return;
x86_pmu_stop(event, PERF_EF_UPDATE);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
while (++i < cpuc->n_events)
cpuc->event_list[i-1] = cpuc->event_list[i];
--cpuc->n_events;
break;
}
}
perf_event_update_userpage(event);
}
static int x86_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
int idx, handled = 0;
u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This generic handler doesn't seem to have any issues where the
* unmasking occurs so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;
/*
* event overflow
*/
handled++;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
if (handled)
inc_irq_stat(apic_perf_irqs);
return handled;
}
void perf_events_lapic_init(void)
{
if (!x86_pmu.apic || !x86_pmu_initialized())
return;
/*
* Always use NMI for PMU
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
struct pmu_nmi_state {
unsigned int marked;
int handled;
};
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
}
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static struct event_constraint unconstrained;
static struct event_constraint emptyconstraint;
static struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &unconstrained;
}
#include "perf_event_amd.c"
#include "perf_event_p6.c"
#include "perf_event_p4.c"
#include "perf_event_intel_lbr.c"
#include "perf_event_intel_ds.c"
#include "perf_event_intel.c"
static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare)
ret = x86_pmu.cpu_prepare(cpu);
break;
case CPU_STARTING:
if (x86_pmu.cpu_starting)
x86_pmu.cpu_starting(cpu);
break;
case CPU_DYING:
if (x86_pmu.cpu_dying)
x86_pmu.cpu_dying(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
break;
default:
break;
}
return ret;
}
static void __init pmu_check_apic(void)
{
if (cpu_has_apic)
return;
x86_pmu.apic = 0;
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
pr_info("no hardware sampling interrupt available.\n");
}
static int __init init_hw_perf_events(void)
{
struct event_constraint *c;
int err;
pr_info("Performance Events: ");
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
err = intel_pmu_init();
break;
case X86_VENDOR_AMD:
err = amd_pmu_init();
break;
default:
return 0;
}
if (err != 0) {
pr_cont("no PMU driver, software events only.\n");
return 0;
}
pmu_check_apic();
/* sanity check that the hardware exists or is emulated */
if (!check_hw_exists())
return 0;
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.quirks)
x86_pmu.quirks();
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters);
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK)
continue;
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(x86_pmu_notifier);
return 0;
}
early_initcall(init_hw_perf_events);
static inline void x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
}
/*
* Start group events scheduling transaction
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
static void x86_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
__this_cpu_write(cpu_hw_events.n_txn, 0);
}
/*
* Stop group events scheduling transaction
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
static void x86_pmu_cancel_txn(struct pmu *pmu)
{
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
/*
* Truncate the collected events.
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
perf_pmu_enable(pmu);
}
/*
* Commit group events scheduling transaction
* Perform the group schedulability test as a whole
* Return 0 if success
*/
static int x86_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
int n, ret;
n = cpuc->n_events;
if (!x86_pmu_initialized())
return -EAGAIN;
ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret)
return ret;
/*
* copy new assignment, now we know it is possible
* will be used by hw_perf_enable()
*/
memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
return 0;
}
/*
* validate that we can schedule this event
*/
static int validate_event(struct perf_event *event)
{
struct cpu_hw_events *fake_cpuc;
struct event_constraint *c;
int ret = 0;
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
if (!fake_cpuc)
return -ENOMEM;
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
ret = -ENOSPC;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
kfree(fake_cpuc);
return ret;
}
/*
* validate a single event group
*
* validation include:
* - check events are compatible which each other
* - events do not compete for the same counter
* - number of events <= number of counters
*
* validation ensures the group can be loaded onto the
* PMU if it was the only group available.
*/
static int validate_group(struct perf_event *event)
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
int ret, n;
ret = -ENOMEM;
fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
if (!fake_cpuc)
goto out;
/*
* the event is not yet connected with its
* siblings therefore we must first collect
* existing siblings, then add the new event
* before we can simulate the scheduling
*/
ret = -ENOSPC;
n = collect_events(fake_cpuc, leader, true);
if (n < 0)
goto out_free;
fake_cpuc->n_events = n;
n = collect_events(fake_cpuc, event, false);
if (n < 0)
goto out_free;
fake_cpuc->n_events = n;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out_free:
kfree(fake_cpuc);
out:
return ret;
}
static int x86_pmu_event_init(struct perf_event *event)
{
struct pmu *tmp;
int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
err = __x86_pmu_event_init(event);
if (!err) {
/*
* we temporarily connect event to its pmu
* such that validate_group() can classify
* it as an x86 event using is_x86_event()
*/
tmp = event->pmu;
event->pmu = &pmu;
if (event->group_leader != event)
err = validate_group(event);
else
err = validate_event(event);
event->pmu = tmp;
}
if (err) {
if (event->destroy)
event->destroy(event);
}
return err;
}
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
.event_init = x86_pmu_event_init,
.add = x86_pmu_add,
.del = x86_pmu_del,
.start = x86_pmu_start,
.stop = x86_pmu_stop,
.read = x86_pmu_read,
.start_txn = x86_pmu_start_txn,
.cancel_txn = x86_pmu_cancel_txn,
.commit_txn = x86_pmu_commit_txn,
};
/*
* callchain support
*/
static int backtrace_stack(void *data, char *name)
{
return 0;
}
static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, addr);
}
static const struct stacktrace_ops backtrace_ops = {
.stack = backtrace_stack,
.address = backtrace_address,
.walk_stack = print_context_stack_bp,
};
void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
}
#ifdef CONFIG_COMPAT
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
fp = compat_ptr(regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if (fp < compat_ptr(regs->sp))
break;
perf_callchain_store(entry, frame.return_address);
fp = compat_ptr(frame.next_frame);
}
return 1;
}
#else
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
return 0;
}
#endif
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
if (perf_callchain_user32(regs, entry))
return;
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if ((unsigned long)fp < regs->sp)
break;
perf_callchain_store(entry, frame.return_address);
fp = frame.next_frame;
}
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
ip = perf_guest_cbs->get_guest_ip();
else
ip = instruction_pointer(regs);
return ip;
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
if (regs->flags & PERF_EFLAGS_EXACT)
misc |= PERF_RECORD_MISC_EXACT_IP;
return misc;
}
|
161,028,940,756,984 | #ifdef CONFIG_CPU_SUP_INTEL
#define MAX_EXTRA_REGS 2
/*
* Per register state.
*/
struct er_account {
int ref; /* reference count */
unsigned int extra_reg; /* extra MSR number */
u64 extra_config; /* extra MSR config */
};
/*
* Per core state
* This used to coordinate shared registers for HT threads.
*/
struct intel_percore {
raw_spinlock_t lock; /* protect structure */
struct er_account regs[MAX_EXTRA_REGS];
int refcnt; /* number of threads */
unsigned core_id;
};
/*
* Intel PerfMon, used on Core and later.
*/
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
[PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
};
static struct event_constraint intel_core_event_constraints[] __read_mostly =
{
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_core2_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/*
* Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
* 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
* ratio between these counters.
*/
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
EVENT_CONSTRAINT_END
};
static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
EVENT_EXTRA_END
};
static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly =
{
INTEL_EVENT_CONSTRAINT(0xb7, 0),
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_snb_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
EVENT_CONSTRAINT_END
};
static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
EVENT_EXTRA_END
};
static struct event_constraint intel_westmere_percore_constraints[] __read_mostly =
{
INTEL_EVENT_CONSTRAINT(0xb7, 0),
INTEL_EVENT_CONSTRAINT(0xbb, 0),
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_gen_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
EVENT_CONSTRAINT_END
};
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
}
static __initconst const u64 snb_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
[ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
[ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
/*
* Use RFO, not WRITEBACK, because a write miss would typically occur
* on RFO.
*/
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
/*
* Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
* See IA32 SDM Vol 3B 30.6.1.3
*/
#define NHM_DMND_DATA_RD (1 << 0)
#define NHM_DMND_RFO (1 << 1)
#define NHM_DMND_IFETCH (1 << 2)
#define NHM_DMND_WB (1 << 3)
#define NHM_PF_DATA_RD (1 << 4)
#define NHM_PF_DATA_RFO (1 << 5)
#define NHM_PF_IFETCH (1 << 6)
#define NHM_OFFCORE_OTHER (1 << 7)
#define NHM_UNCORE_HIT (1 << 8)
#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
#define NHM_OTHER_CORE_HITM (1 << 10)
/* reserved */
#define NHM_REMOTE_CACHE_FWD (1 << 12)
#define NHM_REMOTE_DRAM (1 << 13)
#define NHM_LOCAL_DRAM (1 << 14)
#define NHM_NON_DRAM (1 << 15)
#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
#define NHM_DMND_READ (NHM_DMND_DATA_RD)
#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
static __initconst const u64 nehalem_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
},
}
};
static __initconst const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
/*
* Use RFO, not WRITEBACK, because a write miss would typically occur
* on RFO.
*/
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static __initconst const u64 core2_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
[ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
[ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static __initconst const u64 atom_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
intel_pmu_pebs_disable_all();
intel_pmu_lbr_disable_all();
}
static void intel_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
intel_pmu_pebs_enable_all();
intel_pmu_lbr_enable_all();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
cpuc->events[X86_PMC_IDX_FIXED_BTS];
if (WARN_ON_ONCE(!event))
return;
intel_pmu_enable_bts(event->hw.config);
}
}
/*
* Workaround for:
* Intel Errata AAK100 (model 26)
* Intel Errata AAP53 (model 30)
* Intel Errata BD53 (model 44)
*
* The official story:
* These chips need to be 'reset' when adding counters by programming the
* magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
* in sequence on the same PMC or on different PMCs.
*
* In practise it appears some of these events do in fact count, and
* we need to programm all 4 events.
*/
static void intel_pmu_nhm_workaround(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
static const unsigned long nhm_magic[4] = {
0x4300B5,
0x4300D2,
0x4300B1,
0x4300B1
};
struct perf_event *event;
int i;
/*
* The Errata requires below steps:
* 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
* 2) Configure 4 PERFEVTSELx with the magic events and clear
* the corresponding PMCx;
* 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
* 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
* 5) Clear 4 pairs of ERFEVTSELx and PMCx;
*/
/*
* The real steps we choose are a little different from above.
* A) To reduce MSR operations, we don't run step 1) as they
* are already cleared before this function is called;
* B) Call x86_perf_event_update to save PMCx before configuring
* PERFEVTSELx with magic number;
* C) With step 5), we do clear only when the PERFEVTSELx is
* not used currently.
* D) Call x86_perf_event_set_period to restore PMCx;
*/
/* We always operate 4 pairs of PERF Counters */
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event)
x86_perf_event_update(event);
}
for (i = 0; i < 4; i++) {
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
}
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event) {
x86_perf_event_set_period(event);
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
} else
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
}
}
static void intel_pmu_nhm_enable_all(int added)
{
if (added)
intel_pmu_nhm_workaround();
intel_pmu_enable_all(added);
}
static inline u64 intel_pmu_get_status(void)
{
u64 status;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
return status;
}
static inline void intel_pmu_ack_status(u64 ack)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
wrmsrl(hwc->config_base, ctrl_val);
}
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
return;
}
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_disable(event);
}
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
/*
* Enable IRQ generation (0x8),
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
bits = 0x8ULL;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
bits |= 0x1;
/*
* ANY bit is supported in v3 and up
*/
if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
bits |= 0x4;
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
wrmsrl(hwc->config_base, ctrl_val);
}
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
return;
intel_pmu_enable_bts(hwc->config);
return;
}
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc);
return;
}
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
* Save and restart an expired event. Called by NMI contexts,
* so it has to be careful about preempting normal event ops:
*/
static int intel_pmu_save_and_restart(struct perf_event *event)
{
x86_perf_event_update(event);
return x86_perf_event_set_period(event);
}
static void intel_pmu_reset(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
unsigned long flags;
int idx;
if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
if (ds)
ds->bts_index = ds->bts_buffer_base;
local_irq_restore(flags);
}
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
u64 status;
int handled;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This handler doesn't seem to have any issues with the unmasking
* so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
intel_pmu_enable_all(0);
return handled;
}
loops = 0;
again:
intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
intel_pmu_reset();
goto done;
}
inc_irq_stat(apic_perf_irqs);
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
x86_pmu.drain_pebs(regs);
}
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
handled++;
if (!test_bit(bit, cpuc->active_mask))
continue;
if (!intel_pmu_save_and_restart(event))
continue;
data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs))
x86_pmu_stop(event, 0);
}
/*
* Repeat if there is more work to be done:
*/
status = intel_pmu_get_status();
if (status)
goto again;
done:
intel_pmu_enable_all(0);
return handled;
}
static struct event_constraint *
intel_bts_constraints(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
if (event->attr.freq)
return NULL;
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
return &bts_constraint;
return NULL;
}
static struct event_constraint *
intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
struct event_constraint *c;
struct intel_percore *pc;
struct er_account *era;
int i;
int free_slot;
int found;
if (!x86_pmu.percore_constraints || hwc->extra_alloc)
return NULL;
for (c = x86_pmu.percore_constraints; c->cmask; c++) {
if (e != c->code)
continue;
/*
* Allocate resource per core.
*/
pc = cpuc->per_core;
if (!pc)
break;
c = &emptyconstraint;
raw_spin_lock(&pc->lock);
free_slot = -1;
found = 0;
for (i = 0; i < MAX_EXTRA_REGS; i++) {
era = &pc->regs[i];
if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
/* Allow sharing same config */
if (hwc->extra_config == era->extra_config) {
era->ref++;
cpuc->percore_used = 1;
hwc->extra_alloc = 1;
c = NULL;
}
/* else conflict */
found = 1;
break;
} else if (era->ref == 0 && free_slot == -1)
free_slot = i;
}
if (!found && free_slot != -1) {
era = &pc->regs[free_slot];
era->ref = 1;
era->extra_reg = hwc->extra_reg;
era->extra_config = hwc->extra_config;
cpuc->percore_used = 1;
hwc->extra_alloc = 1;
c = NULL;
}
raw_spin_unlock(&pc->lock);
return c;
}
return NULL;
}
static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
c = intel_bts_constraints(event);
if (c)
return c;
c = intel_pebs_constraints(event);
if (c)
return c;
c = intel_percore_constraints(cpuc, event);
if (c)
return c;
return x86_get_event_constraints(cpuc, event);
}
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct extra_reg *er;
struct intel_percore *pc;
struct er_account *era;
struct hw_perf_event *hwc = &event->hw;
int i, allref;
if (!cpuc->percore_used)
return;
for (er = x86_pmu.extra_regs; er->msr; er++) {
if (er->event != (hwc->config & er->config_mask))
continue;
pc = cpuc->per_core;
raw_spin_lock(&pc->lock);
for (i = 0; i < MAX_EXTRA_REGS; i++) {
era = &pc->regs[i];
if (era->ref > 0 &&
era->extra_config == hwc->extra_config &&
era->extra_reg == er->msr) {
era->ref--;
hwc->extra_alloc = 0;
break;
}
}
allref = 0;
for (i = 0; i < MAX_EXTRA_REGS; i++)
allref += pc->regs[i].ref;
if (allref == 0)
cpuc->percore_used = 0;
raw_spin_unlock(&pc->lock);
break;
}
}
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
if (ret)
return ret;
if (event->attr.precise_ip &&
(event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
/*
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
* (0x003c) so that we can use it with PEBS.
*
* The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
* PEBS capable. However we can use INST_RETIRED.ANY_P
* (0x00c0), which is a PEBS capable event, to get the same
* count.
*
* INST_RETIRED.ANY_P counts the number of cycles that retires
* CNTMASK instructions. By setting CNTMASK to a value (16)
* larger than the maximum number of instructions that can be
* retired per cycle (4) and then inverting the condition, we
* count all cycles that retire 16 or less instructions, which
* is every cycle.
*
* Thereby we gain a PEBS capable cycle counter.
*/
u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
event->hw.config = alt_config;
}
if (event->attr.type != PERF_TYPE_RAW)
return 0;
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
return 0;
if (x86_pmu.version < 3)
return -EINVAL;
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
return 0;
}
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
* the generic event period:
*/
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
.event_constraints = intel_core_event_constraints,
};
static int intel_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
if (!cpu_has_ht_siblings())
return NOTIFY_OK;
cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
GFP_KERNEL, cpu_to_node(cpu));
if (!cpuc->per_core)
return NOTIFY_BAD;
raw_spin_lock_init(&cpuc->per_core->lock);
cpuc->per_core->core_id = -1;
return NOTIFY_OK;
}
static void intel_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int core_id = topology_core_id(cpu);
int i;
init_debug_store_on_cpu(cpu);
/*
* Deal with CPUs that don't clear their LBRs on power-up.
*/
intel_pmu_lbr_reset();
if (!cpu_has_ht_siblings())
return;
for_each_cpu(i, topology_thread_cpumask(cpu)) {
struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
if (pc && pc->core_id == core_id) {
kfree(cpuc->per_core);
cpuc->per_core = pc;
break;
}
}
cpuc->per_core->core_id = core_id;
cpuc->per_core->refcnt++;
}
static void intel_pmu_cpu_dying(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_percore *pc = cpuc->per_core;
if (pc) {
if (pc->core_id == -1 || --pc->refcnt == 0)
kfree(pc);
cpuc->per_core = NULL;
}
fini_debug_store_on_cpu(cpu);
}
static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
.disable_all = intel_pmu_disable_all,
.enable_all = intel_pmu_enable_all,
.enable = intel_pmu_enable_event,
.disable = intel_pmu_disable_event,
.hw_config = intel_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
* the generic event period:
*/
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
};
static void intel_clovertown_quirks(void)
{
/*
* PEBS is unreliable due to:
*
* AJ67 - PEBS may experience CPL leaks
* AJ68 - PEBS PMI may be delayed by one event
* AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
* AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
*
* AJ67 could be worked around by restricting the OS/USR flags.
* AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
*
* AJ106 could possibly be worked around by not allowing LBR
* usage from PEBS, including the fixup.
* AJ68 could possibly be worked around by always programming
* a pebs_event_reset[0] value and coping with the lost events.
*
* But taken together it might just make sense to not enable PEBS on
* these chips.
*/
printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
x86_pmu.pebs = 0;
x86_pmu.pebs_constraints = NULL;
}
static __init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
unsigned int unused;
unsigned int ebx;
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
case 0x6:
return p6_pmu_init();
case 0xf:
return p4_pmu_init();
}
return -ENODEV;
}
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
return -ENODEV;
version = eax.split.version_id;
if (version < 2)
x86_pmu = core_pmu;
else
x86_pmu = intel_pmu;
x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters;
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/*
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
x86_pmu.intel_cap.capabilities = capabilities;
}
intel_ds_init();
/*
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
case 14: /* 65 nm core solo/duo, "Yonah" */
pr_cont("Core events, ");
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
x86_pmu.quirks = intel_clovertown_quirks;
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_core();
x86_pmu.event_constraints = intel_core2_event_constraints;
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
pr_cont("Core2 events, ");
break;
case 26: /* 45 nm nehalem, "Bloomfield" */
case 30: /* 45 nm nehalem, "Lynnfield" */
case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_nehalem_event_constraints;
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
if (ebx & 0x40) {
/*
* Erratum AAJ80 detected, we work it around by using
* the BR_MISP_EXEC.ANY event. This will over-count
* branch-misses, but it's still much better than the
* architectural event which is often completely bogus:
*/
intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
pr_cont("erratum AAJ80 worked around, ");
}
pr_cont("Nehalem events, ");
break;
case 28: /* Atom */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_atom();
x86_pmu.event_constraints = intel_gen_event_constraints;
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
pr_cont("Atom events, ");
break;
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
case 47: /* 32 nm Xeon E7 */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_westmere_event_constraints;
x86_pmu.percore_constraints = intel_westmere_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
pr_cont("Westmere events, ");
break;
case 42: /* SandyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_events;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
pr_cont("SandyBridge events, ");
break;
default:
/*
* default constraints for v2 and up
*/
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("generic architected perfmon, ");
}
return 0;
}
#else /* CONFIG_CPU_SUP_INTEL */
static int intel_pmu_init(void)
{
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */
| #ifdef CONFIG_CPU_SUP_INTEL
#define MAX_EXTRA_REGS 2
/*
* Per register state.
*/
struct er_account {
int ref; /* reference count */
unsigned int extra_reg; /* extra MSR number */
u64 extra_config; /* extra MSR config */
};
/*
* Per core state
* This used to coordinate shared registers for HT threads.
*/
struct intel_percore {
raw_spinlock_t lock; /* protect structure */
struct er_account regs[MAX_EXTRA_REGS];
int refcnt; /* number of threads */
unsigned core_id;
};
/*
* Intel PerfMon, used on Core and later.
*/
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
[PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
};
static struct event_constraint intel_core_event_constraints[] __read_mostly =
{
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_core2_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/*
* Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
* 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
* ratio between these counters.
*/
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
EVENT_CONSTRAINT_END
};
static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
EVENT_EXTRA_END
};
static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly =
{
INTEL_EVENT_CONSTRAINT(0xb7, 0),
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_snb_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
EVENT_CONSTRAINT_END
};
static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff),
EVENT_EXTRA_END
};
static struct event_constraint intel_westmere_percore_constraints[] __read_mostly =
{
INTEL_EVENT_CONSTRAINT(0xb7, 0),
INTEL_EVENT_CONSTRAINT(0xbb, 0),
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_gen_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
EVENT_CONSTRAINT_END
};
static u64 intel_pmu_event_map(int hw_event)
{
return intel_perfmon_event_map[hw_event];
}
static __initconst const u64 snb_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
[ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
[ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static __initconst const u64 westmere_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
/*
* Use RFO, not WRITEBACK, because a write miss would typically occur
* on RFO.
*/
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
/*
* Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
* See IA32 SDM Vol 3B 30.6.1.3
*/
#define NHM_DMND_DATA_RD (1 << 0)
#define NHM_DMND_RFO (1 << 1)
#define NHM_DMND_IFETCH (1 << 2)
#define NHM_DMND_WB (1 << 3)
#define NHM_PF_DATA_RD (1 << 4)
#define NHM_PF_DATA_RFO (1 << 5)
#define NHM_PF_IFETCH (1 << 6)
#define NHM_OFFCORE_OTHER (1 << 7)
#define NHM_UNCORE_HIT (1 << 8)
#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
#define NHM_OTHER_CORE_HITM (1 << 10)
/* reserved */
#define NHM_REMOTE_CACHE_FWD (1 << 12)
#define NHM_REMOTE_DRAM (1 << 13)
#define NHM_LOCAL_DRAM (1 << 14)
#define NHM_NON_DRAM (1 << 15)
#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
#define NHM_DMND_READ (NHM_DMND_DATA_RD)
#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
static __initconst const u64 nehalem_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
},
}
};
static __initconst const u64 nehalem_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
[ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
[ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
[ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
/*
* Use RFO, not WRITEBACK, because a write miss would typically occur
* on RFO.
*/
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
[ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0x0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
[ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static __initconst const u64 core2_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
[ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
[ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static __initconst const u64 atom_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
[ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
[ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
[ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
[ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
[ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
[ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static void intel_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
intel_pmu_pebs_disable_all();
intel_pmu_lbr_disable_all();
}
static void intel_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
intel_pmu_pebs_enable_all();
intel_pmu_lbr_enable_all();
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
cpuc->events[X86_PMC_IDX_FIXED_BTS];
if (WARN_ON_ONCE(!event))
return;
intel_pmu_enable_bts(event->hw.config);
}
}
/*
* Workaround for:
* Intel Errata AAK100 (model 26)
* Intel Errata AAP53 (model 30)
* Intel Errata BD53 (model 44)
*
* The official story:
* These chips need to be 'reset' when adding counters by programming the
* magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
* in sequence on the same PMC or on different PMCs.
*
* In practise it appears some of these events do in fact count, and
* we need to programm all 4 events.
*/
static void intel_pmu_nhm_workaround(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
static const unsigned long nhm_magic[4] = {
0x4300B5,
0x4300D2,
0x4300B1,
0x4300B1
};
struct perf_event *event;
int i;
/*
* The Errata requires below steps:
* 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
* 2) Configure 4 PERFEVTSELx with the magic events and clear
* the corresponding PMCx;
* 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
* 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
* 5) Clear 4 pairs of ERFEVTSELx and PMCx;
*/
/*
* The real steps we choose are a little different from above.
* A) To reduce MSR operations, we don't run step 1) as they
* are already cleared before this function is called;
* B) Call x86_perf_event_update to save PMCx before configuring
* PERFEVTSELx with magic number;
* C) With step 5), we do clear only when the PERFEVTSELx is
* not used currently.
* D) Call x86_perf_event_set_period to restore PMCx;
*/
/* We always operate 4 pairs of PERF Counters */
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event)
x86_perf_event_update(event);
}
for (i = 0; i < 4; i++) {
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
}
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
for (i = 0; i < 4; i++) {
event = cpuc->events[i];
if (event) {
x86_perf_event_set_period(event);
__x86_pmu_enable_event(&event->hw,
ARCH_PERFMON_EVENTSEL_ENABLE);
} else
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
}
}
static void intel_pmu_nhm_enable_all(int added)
{
if (added)
intel_pmu_nhm_workaround();
intel_pmu_enable_all(added);
}
static inline u64 intel_pmu_get_status(void)
{
u64 status;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
return status;
}
static inline void intel_pmu_ack_status(u64 ack)
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
wrmsrl(hwc->config_base, ctrl_val);
}
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
return;
}
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_disable(event);
}
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
int idx = hwc->idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
/*
* Enable IRQ generation (0x8),
* and enable ring-3 counting (0x2) and ring-0 counting (0x1)
* if requested:
*/
bits = 0x8ULL;
if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
bits |= 0x2;
if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
bits |= 0x1;
/*
* ANY bit is supported in v3 and up
*/
if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
bits |= 0x4;
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
wrmsrl(hwc->config_base, ctrl_val);
}
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
return;
intel_pmu_enable_bts(hwc->config);
return;
}
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc);
return;
}
if (unlikely(event->attr.precise_ip))
intel_pmu_pebs_enable(event);
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
}
/*
* Save and restart an expired event. Called by NMI contexts,
* so it has to be careful about preempting normal event ops:
*/
static int intel_pmu_save_and_restart(struct perf_event *event)
{
x86_perf_event_update(event);
return x86_perf_event_set_period(event);
}
static void intel_pmu_reset(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
unsigned long flags;
int idx;
if (!x86_pmu.num_counters)
return;
local_irq_save(flags);
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
if (ds)
ds->bts_index = ds->bts_buffer_base;
local_irq_restore(flags);
}
/*
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
u64 status;
int handled;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
* inside the nmi handler. As a result, the unmasking was pushed
* into all the nmi handlers.
*
* This handler doesn't seem to have any issues with the unmasking
* so it was left at the top.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
intel_pmu_enable_all(0);
return handled;
}
loops = 0;
again:
intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
intel_pmu_reset();
goto done;
}
inc_irq_stat(apic_perf_irqs);
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
x86_pmu.drain_pebs(regs);
}
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
handled++;
if (!test_bit(bit, cpuc->active_mask))
continue;
if (!intel_pmu_save_and_restart(event))
continue;
data.period = event->hw.last_period;
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
/*
* Repeat if there is more work to be done:
*/
status = intel_pmu_get_status();
if (status)
goto again;
done:
intel_pmu_enable_all(0);
return handled;
}
static struct event_constraint *
intel_bts_constraints(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
if (event->attr.freq)
return NULL;
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
return &bts_constraint;
return NULL;
}
static struct event_constraint *
intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
struct event_constraint *c;
struct intel_percore *pc;
struct er_account *era;
int i;
int free_slot;
int found;
if (!x86_pmu.percore_constraints || hwc->extra_alloc)
return NULL;
for (c = x86_pmu.percore_constraints; c->cmask; c++) {
if (e != c->code)
continue;
/*
* Allocate resource per core.
*/
pc = cpuc->per_core;
if (!pc)
break;
c = &emptyconstraint;
raw_spin_lock(&pc->lock);
free_slot = -1;
found = 0;
for (i = 0; i < MAX_EXTRA_REGS; i++) {
era = &pc->regs[i];
if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
/* Allow sharing same config */
if (hwc->extra_config == era->extra_config) {
era->ref++;
cpuc->percore_used = 1;
hwc->extra_alloc = 1;
c = NULL;
}
/* else conflict */
found = 1;
break;
} else if (era->ref == 0 && free_slot == -1)
free_slot = i;
}
if (!found && free_slot != -1) {
era = &pc->regs[free_slot];
era->ref = 1;
era->extra_reg = hwc->extra_reg;
era->extra_config = hwc->extra_config;
cpuc->percore_used = 1;
hwc->extra_alloc = 1;
c = NULL;
}
raw_spin_unlock(&pc->lock);
return c;
}
return NULL;
}
static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct event_constraint *c;
c = intel_bts_constraints(event);
if (c)
return c;
c = intel_pebs_constraints(event);
if (c)
return c;
c = intel_percore_constraints(cpuc, event);
if (c)
return c;
return x86_get_event_constraints(cpuc, event);
}
static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct extra_reg *er;
struct intel_percore *pc;
struct er_account *era;
struct hw_perf_event *hwc = &event->hw;
int i, allref;
if (!cpuc->percore_used)
return;
for (er = x86_pmu.extra_regs; er->msr; er++) {
if (er->event != (hwc->config & er->config_mask))
continue;
pc = cpuc->per_core;
raw_spin_lock(&pc->lock);
for (i = 0; i < MAX_EXTRA_REGS; i++) {
era = &pc->regs[i];
if (era->ref > 0 &&
era->extra_config == hwc->extra_config &&
era->extra_reg == er->msr) {
era->ref--;
hwc->extra_alloc = 0;
break;
}
}
allref = 0;
for (i = 0; i < MAX_EXTRA_REGS; i++)
allref += pc->regs[i].ref;
if (allref == 0)
cpuc->percore_used = 0;
raw_spin_unlock(&pc->lock);
break;
}
}
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
if (ret)
return ret;
if (event->attr.precise_ip &&
(event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
/*
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
* (0x003c) so that we can use it with PEBS.
*
* The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
* PEBS capable. However we can use INST_RETIRED.ANY_P
* (0x00c0), which is a PEBS capable event, to get the same
* count.
*
* INST_RETIRED.ANY_P counts the number of cycles that retires
* CNTMASK instructions. By setting CNTMASK to a value (16)
* larger than the maximum number of instructions that can be
* retired per cycle (4) and then inverting the condition, we
* count all cycles that retire 16 or less instructions, which
* is every cycle.
*
* Thereby we gain a PEBS capable cycle counter.
*/
u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
event->hw.config = alt_config;
}
if (event->attr.type != PERF_TYPE_RAW)
return 0;
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
return 0;
if (x86_pmu.version < 3)
return -EINVAL;
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
return 0;
}
static __initconst const struct x86_pmu core_pmu = {
.name = "core",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = x86_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
* the generic event period:
*/
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
.event_constraints = intel_core_event_constraints,
};
static int intel_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
if (!cpu_has_ht_siblings())
return NOTIFY_OK;
cpuc->per_core = kzalloc_node(sizeof(struct intel_percore),
GFP_KERNEL, cpu_to_node(cpu));
if (!cpuc->per_core)
return NOTIFY_BAD;
raw_spin_lock_init(&cpuc->per_core->lock);
cpuc->per_core->core_id = -1;
return NOTIFY_OK;
}
static void intel_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int core_id = topology_core_id(cpu);
int i;
init_debug_store_on_cpu(cpu);
/*
* Deal with CPUs that don't clear their LBRs on power-up.
*/
intel_pmu_lbr_reset();
if (!cpu_has_ht_siblings())
return;
for_each_cpu(i, topology_thread_cpumask(cpu)) {
struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core;
if (pc && pc->core_id == core_id) {
kfree(cpuc->per_core);
cpuc->per_core = pc;
break;
}
}
cpuc->per_core->core_id = core_id;
cpuc->per_core->refcnt++;
}
static void intel_pmu_cpu_dying(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_percore *pc = cpuc->per_core;
if (pc) {
if (pc->core_id == -1 || --pc->refcnt == 0)
kfree(pc);
cpuc->per_core = NULL;
}
fini_debug_store_on_cpu(cpu);
}
static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
.disable_all = intel_pmu_disable_all,
.enable_all = intel_pmu_enable_all,
.enable = intel_pmu_enable_event,
.disable = intel_pmu_disable_event,
.hw_config = intel_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
.apic = 1,
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
* the generic event period:
*/
.max_period = (1ULL << 31) - 1,
.get_event_constraints = intel_get_event_constraints,
.put_event_constraints = intel_put_event_constraints,
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
};
static void intel_clovertown_quirks(void)
{
/*
* PEBS is unreliable due to:
*
* AJ67 - PEBS may experience CPL leaks
* AJ68 - PEBS PMI may be delayed by one event
* AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
* AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
*
* AJ67 could be worked around by restricting the OS/USR flags.
* AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
*
* AJ106 could possibly be worked around by not allowing LBR
* usage from PEBS, including the fixup.
* AJ68 could possibly be worked around by always programming
* a pebs_event_reset[0] value and coping with the lost events.
*
* But taken together it might just make sense to not enable PEBS on
* these chips.
*/
printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
x86_pmu.pebs = 0;
x86_pmu.pebs_constraints = NULL;
}
static __init int intel_pmu_init(void)
{
union cpuid10_edx edx;
union cpuid10_eax eax;
unsigned int unused;
unsigned int ebx;
int version;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
case 0x6:
return p6_pmu_init();
case 0xf:
return p4_pmu_init();
}
return -ENODEV;
}
/*
* Check whether the Architectural PerfMon supports
* Branch Misses Retired hw_event or not.
*/
cpuid(10, &eax.full, &ebx, &unused, &edx.full);
if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
return -ENODEV;
version = eax.split.version_id;
if (version < 2)
x86_pmu = core_pmu;
else
x86_pmu = intel_pmu;
x86_pmu.version = version;
x86_pmu.num_counters = eax.split.num_counters;
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/*
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
x86_pmu.intel_cap.capabilities = capabilities;
}
intel_ds_init();
/*
* Install the hw-cache-events table:
*/
switch (boot_cpu_data.x86_model) {
case 14: /* 65 nm core solo/duo, "Yonah" */
pr_cont("Core events, ");
break;
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
x86_pmu.quirks = intel_clovertown_quirks;
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
case 29: /* six-core 45 nm xeon "Dunnington" */
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_core();
x86_pmu.event_constraints = intel_core2_event_constraints;
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
pr_cont("Core2 events, ");
break;
case 26: /* 45 nm nehalem, "Bloomfield" */
case 30: /* 45 nm nehalem, "Lynnfield" */
case 46: /* 45 nm nehalem-ex, "Beckton" */
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_nehalem_event_constraints;
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
if (ebx & 0x40) {
/*
* Erratum AAJ80 detected, we work it around by using
* the BR_MISP_EXEC.ANY event. This will over-count
* branch-misses, but it's still much better than the
* architectural event which is often completely bogus:
*/
intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
pr_cont("erratum AAJ80 worked around, ");
}
pr_cont("Nehalem events, ");
break;
case 28: /* Atom */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_atom();
x86_pmu.event_constraints = intel_gen_event_constraints;
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
pr_cont("Atom events, ");
break;
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
case 47: /* 32 nm Xeon E7 */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_westmere_event_constraints;
x86_pmu.percore_constraints = intel_westmere_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
x86_pmu.extra_regs = intel_westmere_extra_regs;
/* UOPS_ISSUED.STALLED_CYCLES */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
/* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1;
pr_cont("Westmere events, ");
break;
case 42: /* SandyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
intel_pmu_lbr_init_nhm();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_events;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
/* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1;
pr_cont("SandyBridge events, ");
break;
default:
/*
* default constraints for v2 and up
*/
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("generic architected perfmon, ");
}
return 0;
}
#else /* CONFIG_CPU_SUP_INTEL */
static int intel_pmu_init(void)
{
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */
|
83,781,774,035,961 | #ifdef CONFIG_CPU_SUP_INTEL
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 4
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE 24
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
#define PEBS_BUFFER_SIZE PAGE_SIZE
/*
* pebs_record_32 for p4 and core not supported
struct pebs_record_32 {
u32 flags, ip;
u32 ax, bc, cx, dx;
u32 si, di, bp, sp;
};
*/
struct pebs_record_core {
u64 flags, ip;
u64 ax, bx, cx, dx;
u64 si, di, bp, sp;
u64 r8, r9, r10, r11;
u64 r12, r13, r14, r15;
};
struct pebs_record_nhm {
u64 flags, ip;
u64 ax, bx, cx, dx;
u64 si, di, bp, sp;
u64 r8, r9, r10, r11;
u64 r12, r13, r14, r15;
u64 status, dla, dse, lat;
};
/*
* A debug store configuration.
*
* We only support architectures that use 64bit fields.
*/
struct debug_store {
u64 bts_buffer_base;
u64 bts_index;
u64 bts_absolute_maximum;
u64 bts_interrupt_threshold;
u64 pebs_buffer_base;
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
static void init_debug_store_on_cpu(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
return;
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
(u32)((u64)(unsigned long)ds),
(u32)((u64)(unsigned long)ds >> 32));
}
static void fini_debug_store_on_cpu(int cpu)
{
if (!per_cpu(cpu_hw_events, cpu).ds)
return;
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}
static int alloc_pebs_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
int node = cpu_to_node(cpu);
int max, thresh = 1; /* always use a single PEBS record */
void *buffer;
if (!x86_pmu.pebs)
return 0;
buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!buffer))
return -ENOMEM;
max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
ds->pebs_index = ds->pebs_buffer_base;
ds->pebs_absolute_maximum = ds->pebs_buffer_base +
max * x86_pmu.pebs_record_size;
ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
thresh * x86_pmu.pebs_record_size;
return 0;
}
static void release_pebs_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds || !x86_pmu.pebs)
return;
kfree((void *)(unsigned long)ds->pebs_buffer_base);
ds->pebs_buffer_base = 0;
}
static int alloc_bts_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
int node = cpu_to_node(cpu);
int max, thresh;
void *buffer;
if (!x86_pmu.bts)
return 0;
buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!buffer))
return -ENOMEM;
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;
ds->bts_buffer_base = (u64)(unsigned long)buffer;
ds->bts_index = ds->bts_buffer_base;
ds->bts_absolute_maximum = ds->bts_buffer_base +
max * BTS_RECORD_SIZE;
ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
thresh * BTS_RECORD_SIZE;
return 0;
}
static void release_bts_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds || !x86_pmu.bts)
return;
kfree((void *)(unsigned long)ds->bts_buffer_base);
ds->bts_buffer_base = 0;
}
static int alloc_ds_buffer(int cpu)
{
int node = cpu_to_node(cpu);
struct debug_store *ds;
ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!ds))
return -ENOMEM;
per_cpu(cpu_hw_events, cpu).ds = ds;
return 0;
}
static void release_ds_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
return;
per_cpu(cpu_hw_events, cpu).ds = NULL;
kfree(ds);
}
static void release_ds_buffers(void)
{
int cpu;
if (!x86_pmu.bts && !x86_pmu.pebs)
return;
get_online_cpus();
for_each_online_cpu(cpu)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
release_pebs_buffer(cpu);
release_bts_buffer(cpu);
release_ds_buffer(cpu);
}
put_online_cpus();
}
static void reserve_ds_buffers(void)
{
int bts_err = 0, pebs_err = 0;
int cpu;
x86_pmu.bts_active = 0;
x86_pmu.pebs_active = 0;
if (!x86_pmu.bts && !x86_pmu.pebs)
return;
if (!x86_pmu.bts)
bts_err = 1;
if (!x86_pmu.pebs)
pebs_err = 1;
get_online_cpus();
for_each_possible_cpu(cpu) {
if (alloc_ds_buffer(cpu)) {
bts_err = 1;
pebs_err = 1;
}
if (!bts_err && alloc_bts_buffer(cpu))
bts_err = 1;
if (!pebs_err && alloc_pebs_buffer(cpu))
pebs_err = 1;
if (bts_err && pebs_err)
break;
}
if (bts_err) {
for_each_possible_cpu(cpu)
release_bts_buffer(cpu);
}
if (pebs_err) {
for_each_possible_cpu(cpu)
release_pebs_buffer(cpu);
}
if (bts_err && pebs_err) {
for_each_possible_cpu(cpu)
release_ds_buffer(cpu);
} else {
if (x86_pmu.bts && !bts_err)
x86_pmu.bts_active = 1;
if (x86_pmu.pebs && !pebs_err)
x86_pmu.pebs_active = 1;
for_each_online_cpu(cpu)
init_debug_store_on_cpu(cpu);
}
put_online_cpus();
}
/*
* BTS
*/
static struct event_constraint bts_constraint =
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
static void intel_pmu_enable_bts(u64 config)
{
unsigned long debugctlmsr;
debugctlmsr = get_debugctlmsr();
debugctlmsr |= DEBUGCTLMSR_TR;
debugctlmsr |= DEBUGCTLMSR_BTS;
debugctlmsr |= DEBUGCTLMSR_BTINT;
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
update_debugctlmsr(debugctlmsr);
}
static void intel_pmu_disable_bts(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long debugctlmsr;
if (!cpuc->ds)
return;
debugctlmsr = get_debugctlmsr();
debugctlmsr &=
~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
update_debugctlmsr(debugctlmsr);
}
static int intel_pmu_drain_bts_buffer(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct bts_record {
u64 from;
u64 to;
u64 flags;
};
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top;
struct perf_output_handle handle;
struct perf_event_header header;
struct perf_sample_data data;
struct pt_regs regs;
if (!event)
return 0;
if (!x86_pmu.bts_active)
return 0;
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
top = (struct bts_record *)(unsigned long)ds->bts_index;
if (top <= at)
return 0;
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
* We will overwrite the from and to address before we output
* the sample.
*/
perf_prepare_sample(&header, &data, event, ®s);
if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
return 1;
for (; at < top; at++) {
data.ip = at->from;
data.addr = at->to;
perf_output_sample(&handle, &header, &data, event);
}
perf_output_end(&handle);
/* There's new data available. */
event->hw.interrupts++;
event->pending_kill = POLL_IN;
return 1;
}
/*
* PEBS
*/
static struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_snb_pebs_events[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
EVENT_CONSTRAINT_END
};
static struct event_constraint *
intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
if (!event->attr.precise_ip)
return NULL;
if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &emptyconstraint;
}
static void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
cpuc->pebs_enabled |= 1ULL << hwc->idx;
WARN_ON_ONCE(cpuc->enabled);
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
}
static void intel_pmu_pebs_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
if (cpuc->enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_disable(event);
}
static void intel_pmu_pebs_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
}
static void intel_pmu_pebs_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
#include <asm/insn.h>
static inline bool kernel_ip(unsigned long ip)
{
#ifdef CONFIG_X86_32
return ip > PAGE_OFFSET;
#else
return (long)ip < 0;
#endif
}
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
/*
* We don't need to fixup if the PEBS assist is fault like
*/
if (!x86_pmu.intel_cap.pebs_trap)
return 1;
/*
* No LBR entry, no basic block, no rewinding
*/
if (!cpuc->lbr_stack.nr || !from || !to)
return 0;
/*
* Basic blocks should never cross user/kernel boundaries
*/
if (kernel_ip(ip) != kernel_ip(to))
return 0;
/*
* unsigned math, either ip is before the start (impossible) or
* the basic block is larger than 1 page (sanity)
*/
if ((ip - to) > PAGE_SIZE)
return 0;
/*
* We sampled a branch insn, rewind using the LBR stack
*/
if (ip == to) {
regs->ip = from;
return 1;
}
do {
struct insn insn;
u8 buf[MAX_INSN_SIZE];
void *kaddr;
old_to = to;
if (!kernel_ip(ip)) {
int bytes, size = MAX_INSN_SIZE;
bytes = copy_from_user_nmi(buf, (void __user *)to, size);
if (bytes != size)
return 0;
kaddr = buf;
} else
kaddr = (void *)to;
kernel_insn_init(&insn, kaddr);
insn_get_length(&insn);
to += insn.length;
} while (to < ip);
if (to == ip) {
regs->ip = old_to;
return 1;
}
/*
* Even though we decoded the basic block, the instruction stream
* never matched the given IP, either the TO or the IP got corrupted.
*/
return 0;
}
static int intel_pmu_save_and_restart(struct perf_event *event);
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
/*
* We cast to pebs_record_core since that is a subset of
* both formats and we don't use the other fields in this
* routine.
*/
struct pebs_record_core *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
if (!intel_pmu_save_and_restart(event))
return;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
/*
* We use the interrupt regs as a base because the PEBS record
* does not contain a full regs set, specifically it seems to
* lack segment descriptors, which get used by things like
* user_mode().
*
* In the simple case fix up only the IP and BP,SP regs, for
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
regs.ip = pebs->ip;
regs.bp = pebs->bp;
regs.sp = pebs->sp;
if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, 1, &data, ®s))
x86_pmu_stop(event, 0);
}
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
struct pebs_record_core *at, *top;
int n;
if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
/*
* Whatever else happens, drain the thing
*/
ds->pebs_index = ds->pebs_buffer_base;
if (!test_bit(0, cpuc->active_mask))
return;
WARN_ON_ONCE(!event);
if (!event->attr.precise_ip)
return;
n = top - at;
if (n <= 0)
return;
/*
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
WARN_ON_ONCE(n > 1);
at += n - 1;
__intel_pmu_pebs_event(event, iregs, at);
}
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct pebs_record_nhm *at, *top;
struct perf_event *event = NULL;
u64 status = 0;
int bit, n;
if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
ds->pebs_index = ds->pebs_buffer_base;
n = top - at;
if (n <= 0)
return;
/*
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
for ( ; at < top; at++) {
for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
WARN_ON_ONCE(!event);
if (!event->attr.precise_ip)
continue;
if (__test_and_set_bit(bit, (unsigned long *)&status))
continue;
break;
}
if (!event || bit >= MAX_PEBS_EVENTS)
continue;
__intel_pmu_pebs_event(event, iregs, at);
}
}
/*
* BTS, PEBS probe and setup
*/
static void intel_ds_init(void)
{
/*
* No support for 32bit formats
*/
if (!boot_cpu_has(X86_FEATURE_DTES64))
return;
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
int format = x86_pmu.intel_cap.pebs_format;
switch (format) {
case 0:
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
break;
case 1:
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
break;
default:
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
}
}
}
#else /* CONFIG_CPU_SUP_INTEL */
static void reserve_ds_buffers(void)
{
}
static void release_ds_buffers(void)
{
}
#endif /* CONFIG_CPU_SUP_INTEL */
| #ifdef CONFIG_CPU_SUP_INTEL
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 4
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE 24
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
#define PEBS_BUFFER_SIZE PAGE_SIZE
/*
* pebs_record_32 for p4 and core not supported
struct pebs_record_32 {
u32 flags, ip;
u32 ax, bc, cx, dx;
u32 si, di, bp, sp;
};
*/
struct pebs_record_core {
u64 flags, ip;
u64 ax, bx, cx, dx;
u64 si, di, bp, sp;
u64 r8, r9, r10, r11;
u64 r12, r13, r14, r15;
};
struct pebs_record_nhm {
u64 flags, ip;
u64 ax, bx, cx, dx;
u64 si, di, bp, sp;
u64 r8, r9, r10, r11;
u64 r12, r13, r14, r15;
u64 status, dla, dse, lat;
};
/*
* A debug store configuration.
*
* We only support architectures that use 64bit fields.
*/
struct debug_store {
u64 bts_buffer_base;
u64 bts_index;
u64 bts_absolute_maximum;
u64 bts_interrupt_threshold;
u64 pebs_buffer_base;
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
static void init_debug_store_on_cpu(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
return;
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
(u32)((u64)(unsigned long)ds),
(u32)((u64)(unsigned long)ds >> 32));
}
static void fini_debug_store_on_cpu(int cpu)
{
if (!per_cpu(cpu_hw_events, cpu).ds)
return;
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
}
static int alloc_pebs_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
int node = cpu_to_node(cpu);
int max, thresh = 1; /* always use a single PEBS record */
void *buffer;
if (!x86_pmu.pebs)
return 0;
buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!buffer))
return -ENOMEM;
max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
ds->pebs_index = ds->pebs_buffer_base;
ds->pebs_absolute_maximum = ds->pebs_buffer_base +
max * x86_pmu.pebs_record_size;
ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
thresh * x86_pmu.pebs_record_size;
return 0;
}
static void release_pebs_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds || !x86_pmu.pebs)
return;
kfree((void *)(unsigned long)ds->pebs_buffer_base);
ds->pebs_buffer_base = 0;
}
static int alloc_bts_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
int node = cpu_to_node(cpu);
int max, thresh;
void *buffer;
if (!x86_pmu.bts)
return 0;
buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!buffer))
return -ENOMEM;
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
thresh = max / 16;
ds->bts_buffer_base = (u64)(unsigned long)buffer;
ds->bts_index = ds->bts_buffer_base;
ds->bts_absolute_maximum = ds->bts_buffer_base +
max * BTS_RECORD_SIZE;
ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
thresh * BTS_RECORD_SIZE;
return 0;
}
static void release_bts_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds || !x86_pmu.bts)
return;
kfree((void *)(unsigned long)ds->bts_buffer_base);
ds->bts_buffer_base = 0;
}
static int alloc_ds_buffer(int cpu)
{
int node = cpu_to_node(cpu);
struct debug_store *ds;
ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!ds))
return -ENOMEM;
per_cpu(cpu_hw_events, cpu).ds = ds;
return 0;
}
static void release_ds_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
return;
per_cpu(cpu_hw_events, cpu).ds = NULL;
kfree(ds);
}
static void release_ds_buffers(void)
{
int cpu;
if (!x86_pmu.bts && !x86_pmu.pebs)
return;
get_online_cpus();
for_each_online_cpu(cpu)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
release_pebs_buffer(cpu);
release_bts_buffer(cpu);
release_ds_buffer(cpu);
}
put_online_cpus();
}
static void reserve_ds_buffers(void)
{
int bts_err = 0, pebs_err = 0;
int cpu;
x86_pmu.bts_active = 0;
x86_pmu.pebs_active = 0;
if (!x86_pmu.bts && !x86_pmu.pebs)
return;
if (!x86_pmu.bts)
bts_err = 1;
if (!x86_pmu.pebs)
pebs_err = 1;
get_online_cpus();
for_each_possible_cpu(cpu) {
if (alloc_ds_buffer(cpu)) {
bts_err = 1;
pebs_err = 1;
}
if (!bts_err && alloc_bts_buffer(cpu))
bts_err = 1;
if (!pebs_err && alloc_pebs_buffer(cpu))
pebs_err = 1;
if (bts_err && pebs_err)
break;
}
if (bts_err) {
for_each_possible_cpu(cpu)
release_bts_buffer(cpu);
}
if (pebs_err) {
for_each_possible_cpu(cpu)
release_pebs_buffer(cpu);
}
if (bts_err && pebs_err) {
for_each_possible_cpu(cpu)
release_ds_buffer(cpu);
} else {
if (x86_pmu.bts && !bts_err)
x86_pmu.bts_active = 1;
if (x86_pmu.pebs && !pebs_err)
x86_pmu.pebs_active = 1;
for_each_online_cpu(cpu)
init_debug_store_on_cpu(cpu);
}
put_online_cpus();
}
/*
* BTS
*/
static struct event_constraint bts_constraint =
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
static void intel_pmu_enable_bts(u64 config)
{
unsigned long debugctlmsr;
debugctlmsr = get_debugctlmsr();
debugctlmsr |= DEBUGCTLMSR_TR;
debugctlmsr |= DEBUGCTLMSR_BTS;
debugctlmsr |= DEBUGCTLMSR_BTINT;
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
update_debugctlmsr(debugctlmsr);
}
static void intel_pmu_disable_bts(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long debugctlmsr;
if (!cpuc->ds)
return;
debugctlmsr = get_debugctlmsr();
debugctlmsr &=
~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
update_debugctlmsr(debugctlmsr);
}
static int intel_pmu_drain_bts_buffer(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct bts_record {
u64 from;
u64 to;
u64 flags;
};
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top;
struct perf_output_handle handle;
struct perf_event_header header;
struct perf_sample_data data;
struct pt_regs regs;
if (!event)
return 0;
if (!x86_pmu.bts_active)
return 0;
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
top = (struct bts_record *)(unsigned long)ds->bts_index;
if (top <= at)
return 0;
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
* We will overwrite the from and to address before we output
* the sample.
*/
perf_prepare_sample(&header, &data, event, ®s);
if (perf_output_begin(&handle, event, header.size * (top - at), 1))
return 1;
for (; at < top; at++) {
data.ip = at->from;
data.addr = at->to;
perf_output_sample(&handle, &header, &data, event);
}
perf_output_end(&handle);
/* There's new data available. */
event->hw.interrupts++;
event->pending_kill = POLL_IN;
return 1;
}
/*
* PEBS
*/
static struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
INTEL_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_EVENT_CONSTRAINT(0x0b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
INTEL_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
EVENT_CONSTRAINT_END
};
static struct event_constraint intel_snb_pebs_events[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
EVENT_CONSTRAINT_END
};
static struct event_constraint *
intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
if (!event->attr.precise_ip)
return NULL;
if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
if ((event->hw.config & c->cmask) == c->code)
return c;
}
}
return &emptyconstraint;
}
static void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
cpuc->pebs_enabled |= 1ULL << hwc->idx;
WARN_ON_ONCE(cpuc->enabled);
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
}
static void intel_pmu_pebs_disable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
if (cpuc->enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_disable(event);
}
static void intel_pmu_pebs_enable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
}
static void intel_pmu_pebs_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
#include <asm/insn.h>
static inline bool kernel_ip(unsigned long ip)
{
#ifdef CONFIG_X86_32
return ip > PAGE_OFFSET;
#else
return (long)ip < 0;
#endif
}
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
/*
* We don't need to fixup if the PEBS assist is fault like
*/
if (!x86_pmu.intel_cap.pebs_trap)
return 1;
/*
* No LBR entry, no basic block, no rewinding
*/
if (!cpuc->lbr_stack.nr || !from || !to)
return 0;
/*
* Basic blocks should never cross user/kernel boundaries
*/
if (kernel_ip(ip) != kernel_ip(to))
return 0;
/*
* unsigned math, either ip is before the start (impossible) or
* the basic block is larger than 1 page (sanity)
*/
if ((ip - to) > PAGE_SIZE)
return 0;
/*
* We sampled a branch insn, rewind using the LBR stack
*/
if (ip == to) {
regs->ip = from;
return 1;
}
do {
struct insn insn;
u8 buf[MAX_INSN_SIZE];
void *kaddr;
old_to = to;
if (!kernel_ip(ip)) {
int bytes, size = MAX_INSN_SIZE;
bytes = copy_from_user_nmi(buf, (void __user *)to, size);
if (bytes != size)
return 0;
kaddr = buf;
} else
kaddr = (void *)to;
kernel_insn_init(&insn, kaddr);
insn_get_length(&insn);
to += insn.length;
} while (to < ip);
if (to == ip) {
regs->ip = old_to;
return 1;
}
/*
* Even though we decoded the basic block, the instruction stream
* never matched the given IP, either the TO or the IP got corrupted.
*/
return 0;
}
static int intel_pmu_save_and_restart(struct perf_event *event);
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs, void *__pebs)
{
/*
* We cast to pebs_record_core since that is a subset of
* both formats and we don't use the other fields in this
* routine.
*/
struct pebs_record_core *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
if (!intel_pmu_save_and_restart(event))
return;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
/*
* We use the interrupt regs as a base because the PEBS record
* does not contain a full regs set, specifically it seems to
* lack segment descriptors, which get used by things like
* user_mode().
*
* In the simple case fix up only the IP and BP,SP regs, for
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
regs.ip = pebs->ip;
regs.bp = pebs->bp;
regs.sp = pebs->sp;
if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
regs.flags |= PERF_EFLAGS_EXACT;
else
regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, &data, ®s))
x86_pmu_stop(event, 0);
}
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
struct pebs_record_core *at, *top;
int n;
if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
/*
* Whatever else happens, drain the thing
*/
ds->pebs_index = ds->pebs_buffer_base;
if (!test_bit(0, cpuc->active_mask))
return;
WARN_ON_ONCE(!event);
if (!event->attr.precise_ip)
return;
n = top - at;
if (n <= 0)
return;
/*
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
WARN_ON_ONCE(n > 1);
at += n - 1;
__intel_pmu_pebs_event(event, iregs, at);
}
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct pebs_record_nhm *at, *top;
struct perf_event *event = NULL;
u64 status = 0;
int bit, n;
if (!x86_pmu.pebs_active)
return;
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
ds->pebs_index = ds->pebs_buffer_base;
n = top - at;
if (n <= 0)
return;
/*
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
for ( ; at < top; at++) {
for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
WARN_ON_ONCE(!event);
if (!event->attr.precise_ip)
continue;
if (__test_and_set_bit(bit, (unsigned long *)&status))
continue;
break;
}
if (!event || bit >= MAX_PEBS_EVENTS)
continue;
__intel_pmu_pebs_event(event, iregs, at);
}
}
/*
* BTS, PEBS probe and setup
*/
static void intel_ds_init(void)
{
/*
* No support for 32bit formats
*/
if (!boot_cpu_has(X86_FEATURE_DTES64))
return;
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
int format = x86_pmu.intel_cap.pebs_format;
switch (format) {
case 0:
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
break;
case 1:
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
break;
default:
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
}
}
}
#else /* CONFIG_CPU_SUP_INTEL */
static void reserve_ds_buffers(void)
{
}
static void release_ds_buffers(void)
{
}
#endif /* CONFIG_CPU_SUP_INTEL */
|
175,433,667,749,742 | /*
* Netburst Performance Events (P4, old Xeon)
*
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
*
* For licencing details see kernel-base/COPYING
*/
#ifdef CONFIG_CPU_SUP_INTEL
#include <asm/perf_event_p4.h>
#define P4_CNTR_LIMIT 3
/*
* array indices: 0,1 - HT threads, used with HT enabled cpu
*/
struct p4_event_bind {
unsigned int opcode; /* Event code and ESCR selector */
unsigned int escr_msr[2]; /* ESCR MSR for this event */
unsigned int escr_emask; /* valid ESCR EventMask bits */
unsigned int shared; /* event is shared across threads */
char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
};
struct p4_pebs_bind {
unsigned int metric_pebs;
unsigned int metric_vert;
};
/* it sets P4_PEBS_ENABLE_UOP_TAG as well */
#define P4_GEN_PEBS_BIND(name, pebs, vert) \
[P4_PEBS_METRIC__##name] = { \
.metric_pebs = pebs | P4_PEBS_ENABLE_UOP_TAG, \
.metric_vert = vert, \
}
/*
* note we have P4_PEBS_ENABLE_UOP_TAG always set here
*
* it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
* event configuration to find out which values are to be
* written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
* resgisters
*/
static struct p4_pebs_bind p4_pebs_bind_map[] = {
P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
P4_GEN_PEBS_BIND(2ndl_cache_load_miss_retired, 0x0000002, 0x0000001),
P4_GEN_PEBS_BIND(dtlb_load_miss_retired, 0x0000004, 0x0000001),
P4_GEN_PEBS_BIND(dtlb_store_miss_retired, 0x0000004, 0x0000002),
P4_GEN_PEBS_BIND(dtlb_all_miss_retired, 0x0000004, 0x0000003),
P4_GEN_PEBS_BIND(tagged_mispred_branch, 0x0018000, 0x0000010),
P4_GEN_PEBS_BIND(mob_load_replay_retired, 0x0000200, 0x0000001),
P4_GEN_PEBS_BIND(split_load_retired, 0x0000400, 0x0000001),
P4_GEN_PEBS_BIND(split_store_retired, 0x0000400, 0x0000002),
};
/*
* Note that we don't use CCCR1 here, there is an
* exception for P4_BSQ_ALLOCATION but we just have
* no workaround
*
* consider this binding as resources which particular
* event may borrow, it doesn't contain EventMask,
* Tags and friends -- they are left to a caller
*/
static struct p4_event_bind p4_event_bind_map[] = {
[P4_EVENT_TC_DELIVER_MODE] = {
.opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
.shared = 1,
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_BPU_FETCH_REQUEST] = {
.opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
.escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_ITLB_REFERENCE] = {
.opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
.escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) |
P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_MEMORY_CANCEL] = {
.opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) |
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_MEMORY_COMPLETE] = {
.opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) |
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_LOAD_PORT_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
.escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_STORE_PORT_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_MOB_LOAD_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
.escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) |
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) |
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) |
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_PAGE_WALK_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
.escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
.shared = 1,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BSQ_CACHE_REFERENCE] = {
.opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_IOQ_ALLOCATION] = {
.opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
.opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
.escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
.cntr = { {2, -1, -1}, {3, -1, -1} },
},
[P4_EVENT_FSB_DATA_ACTIVITY] = {
.opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
.shared = 1,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
.opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
.cntr = { {0, -1, -1}, {1, -1, -1} },
},
[P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
.opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
.escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
.cntr = { {2, -1, -1}, {3, -1, -1} },
},
[P4_EVENT_SSE_INPUT_ASSIST] = {
.opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_PACKED_SP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_PACKED_DP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_SCALAR_SP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_SCALAR_DP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_64BIT_MMX_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_128BIT_MMX_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_X87_FP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_TC_MISC] = {
.opcode = P4_OPCODE(P4_EVENT_TC_MISC),
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_GLOBAL_POWER_EVENTS] = {
.opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_TC_MS_XFER] = {
.opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_UOP_QUEUE_WRITES] = {
.opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RETIRED_BRANCH_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RESOURCE_STALL] = {
.opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
.escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_WC_BUFFER] = {
.opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) |
P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_B2B_CYCLES] = {
.opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BNR] = {
.opcode = P4_OPCODE(P4_EVENT_BNR),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_SNOOP] = {
.opcode = P4_OPCODE(P4_EVENT_SNOOP),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_RESPONSE] = {
.opcode = P4_OPCODE(P4_EVENT_RESPONSE),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_FRONT_END_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_EXECUTION_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_REPLAY_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_INSTR_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_UOPS_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_UOP_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
.escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_BRANCH_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) |
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) |
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_MISPRED_BRANCH_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_X87_ASSIST] = {
.opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_MACHINE_CLEAR] = {
.opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) |
P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) |
P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_INSTR_COMPLETED] = {
.opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
};
#define P4_GEN_CACHE_EVENT(event, bit, metric) \
p4_config_pack_escr(P4_ESCR_EVENT(event) | \
P4_ESCR_EMASK_BIT(event, bit)) | \
p4_config_pack_cccr(metric | \
P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
static __initconst const u64 p4_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__1stl_cache_load_miss_retired),
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__2ndl_cache_load_miss_retired),
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__dtlb_load_miss_retired),
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__dtlb_store_miss_retired),
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
P4_PEBS_METRIC__none),
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
P4_PEBS_METRIC__none),
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
/* non-halted CPU clocks */
[PERF_COUNT_HW_CPU_CYCLES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
/*
* retired instructions
* in a sake of simplicity we don't use the FSB tagging
*/
[PERF_COUNT_HW_INSTRUCTIONS] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
/* cache hits */
[PERF_COUNT_HW_CACHE_REFERENCES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
/* cache misses */
[PERF_COUNT_HW_CACHE_MISSES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
/* branch instructions retired */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
/* mispredicted branches retired */
[PERF_COUNT_HW_BRANCH_MISSES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
/* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
[PERF_COUNT_HW_BUS_CYCLES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
};
static struct p4_event_bind *p4_config_get_bind(u64 config)
{
unsigned int evnt = p4_config_unpack_event(config);
struct p4_event_bind *bind = NULL;
if (evnt < ARRAY_SIZE(p4_event_bind_map))
bind = &p4_event_bind_map[evnt];
return bind;
}
static u64 p4_pmu_event_map(int hw_event)
{
struct p4_event_bind *bind;
unsigned int esel;
u64 config;
config = p4_general_events[hw_event];
bind = p4_config_get_bind(config);
esel = P4_OPCODE_ESEL(bind->opcode);
config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
return config;
}
/* check cpu model specifics */
static bool p4_event_match_cpu_model(unsigned int event_idx)
{
/* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
if (event_idx == P4_EVENT_INSTR_COMPLETED) {
if (boot_cpu_data.x86_model != 3 &&
boot_cpu_data.x86_model != 4 &&
boot_cpu_data.x86_model != 6)
return false;
}
/*
* For info
* - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
*/
return true;
}
static int p4_validate_raw_event(struct perf_event *event)
{
unsigned int v, emask;
/* User data may have out-of-bound event index */
v = p4_config_unpack_event(event->attr.config);
if (v >= ARRAY_SIZE(p4_event_bind_map))
return -EINVAL;
/* It may be unsupported: */
if (!p4_event_match_cpu_model(v))
return -EINVAL;
/*
* NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
* in Architectural Performance Monitoring, it means not
* on _which_ logical cpu to count but rather _when_, ie it
* depends on logical cpu state -- count event if one cpu active,
* none, both or any, so we just allow user to pass any value
* desired.
*
* In turn we always set Tx_OS/Tx_USR bits bound to logical
* cpu without their propagation to another cpu
*/
/*
* if an event is shared across the logical threads
* the user needs special permissions to be able to use it
*/
if (p4_ht_active() && p4_event_bind_map[v].shared) {
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
/* ESCR EventMask bits may be invalid */
emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
if (emask & ~p4_event_bind_map[v].escr_emask)
return -EINVAL;
/*
* it may have some invalid PEBS bits
*/
if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
return -EINVAL;
v = p4_config_unpack_metric(event->attr.config);
if (v >= ARRAY_SIZE(p4_pebs_bind_map))
return -EINVAL;
return 0;
}
static void p4_hw_watchdog_set_attr(struct perf_event_attr *wd_attr)
{
/*
* Watchdog ticks are special on Netburst, we use
* that named "non-sleeping" ticks as recommended
* by Intel SDM Vol3b.
*/
WARN_ON_ONCE(wd_attr->type != PERF_TYPE_HARDWARE ||
wd_attr->config != PERF_COUNT_HW_CPU_CYCLES);
wd_attr->type = PERF_TYPE_RAW;
wd_attr->config =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3)) |
p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
P4_CCCR_COMPARE);
}
static int p4_hw_config(struct perf_event *event)
{
int cpu = get_cpu();
int rc = 0;
u32 escr, cccr;
/*
* the reason we use cpu that early is that: if we get scheduled
* first time on the same cpu -- we will not need swap thread
* specific flags in config (and will save some cpu cycles)
*/
cccr = p4_default_cccr_conf(cpu);
escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
event->attr.exclude_user);
event->hw.config = p4_config_pack_escr(escr) |
p4_config_pack_cccr(cccr);
if (p4_ht_active() && p4_ht_thread(cpu))
event->hw.config = p4_set_ht_bit(event->hw.config);
if (event->attr.type == PERF_TYPE_RAW) {
struct p4_event_bind *bind;
unsigned int esel;
/*
* Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space
*/
event->attr.config &= P4_CONFIG_MASK;
rc = p4_validate_raw_event(event);
if (rc)
goto out;
/*
* Note that for RAW events we allow user to use P4_CCCR_RESERVED
* bits since we keep additional info here (for cache events and etc)
*/
event->hw.config |= event->attr.config;
bind = p4_config_get_bind(event->attr.config);
if (!bind) {
rc = -EINVAL;
goto out;
}
esel = P4_OPCODE_ESEL(bind->opcode);
event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
}
rc = x86_setup_perfctr(event);
out:
put_cpu();
return rc;
}
static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{
u64 v;
/* an official way for overflow indication */
rdmsrl(hwc->config_base, v);
if (v & P4_CCCR_OVF) {
wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
return 1;
}
/*
* In some circumstances the overflow might issue an NMI but did
* not set P4_CCCR_OVF bit. Because a counter holds a negative value
* we simply check for high bit being set, if it's cleared it means
* the counter has reached zero value and continued counting before
* real NMI signal was received:
*/
rdmsrl(hwc->event_base, v);
if (!(v & ARCH_P4_UNFLAGGED_BIT))
return 1;
return 0;
}
static void p4_pmu_disable_pebs(void)
{
/*
* FIXME
*
* It's still allowed that two threads setup same cache
* events so we can't simply clear metrics until we knew
* no one is depending on us, so we need kind of counter
* for "ReplayEvent" users.
*
* What is more complex -- RAW events, if user (for some
* reason) will pass some cache event metric with improper
* event opcode -- it's fine from hardware point of view
* but completely nonsense from "meaning" of such action.
*
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
*
* (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
* (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
*/
}
static inline void p4_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/*
* If event gets disabled while counter is in overflowed
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
(void)checking_wrmsrl(hwc->config_base,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
static void p4_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
p4_pmu_disable_event(event);
}
p4_pmu_disable_pebs();
}
/* configuration must be valid */
static void p4_pmu_enable_pebs(u64 config)
{
struct p4_pebs_bind *bind;
unsigned int idx;
BUILD_BUG_ON(P4_PEBS_METRIC__max > P4_PEBS_CONFIG_METRIC_MASK);
idx = p4_config_unpack_metric(config);
if (idx == P4_PEBS_METRIC__none)
return;
bind = &p4_pebs_bind_map[idx];
(void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
(void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
}
static void p4_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int thread = p4_ht_config_thread(hwc->config);
u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
unsigned int idx = p4_config_unpack_event(hwc->config);
struct p4_event_bind *bind;
u64 escr_addr, cccr;
bind = &p4_event_bind_map[idx];
escr_addr = (u64)bind->escr_msr[thread];
/*
* - we dont support cascaded counters yet
* - and counter 1 is broken (erratum)
*/
WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
WARN_ON_ONCE(hwc->idx == 1);
/* we need a real Event value */
escr_conf &= ~P4_ESCR_EVENT_MASK;
escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
cccr = p4_config_unpack_cccr(hwc->config);
/*
* it could be Cache event so we need to write metrics
* into additional MSRs
*/
p4_pmu_enable_pebs(hwc->config);
(void)checking_wrmsrl(escr_addr, escr_conf);
(void)checking_wrmsrl(hwc->config_base,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
static void p4_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
p4_pmu_enable_event(event);
}
}
static int p4_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int overflow;
if (!test_bit(idx, cpuc->active_mask)) {
/* catch in-flight IRQs */
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
hwc = &event->hw;
WARN_ON_ONCE(hwc->idx != idx);
/* it might be unflagged overflow */
overflow = p4_pmu_clear_cccr_ovf(hwc);
val = x86_perf_event_update(event);
if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
continue;
handled += overflow;
/* event overflow for sure */
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
x86_pmu_stop(event, 0);
}
if (handled)
inc_irq_stat(apic_perf_irqs);
/*
* When dealing with the unmasking of the LVTPC on P4 perf hw, it has
* been observed that the OVF bit flag has to be cleared first _before_
* the LVTPC can be unmasked.
*
* The reason is the NMI line will continue to be asserted while the OVF
* bit is set. This causes a second NMI to generate if the LVTPC is
* unmasked before the OVF bit is cleared, leading to unknown NMI
* messages.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
return handled;
}
/*
* swap thread specific fields according to a thread
* we are going to run on
*/
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
{
u32 escr, cccr;
/*
* we either lucky and continue on same cpu or no HT support
*/
if (!p4_should_swap_ts(hwc->config, cpu))
return;
/*
* the event is migrated from an another logical
* cpu, so we need to swap thread specific flags
*/
escr = p4_config_unpack_escr(hwc->config);
cccr = p4_config_unpack_cccr(hwc->config);
if (p4_ht_thread(cpu)) {
cccr &= ~P4_CCCR_OVF_PMI_T0;
cccr |= P4_CCCR_OVF_PMI_T1;
if (escr & P4_ESCR_T0_OS) {
escr &= ~P4_ESCR_T0_OS;
escr |= P4_ESCR_T1_OS;
}
if (escr & P4_ESCR_T0_USR) {
escr &= ~P4_ESCR_T0_USR;
escr |= P4_ESCR_T1_USR;
}
hwc->config = p4_config_pack_escr(escr);
hwc->config |= p4_config_pack_cccr(cccr);
hwc->config |= P4_CONFIG_HT;
} else {
cccr &= ~P4_CCCR_OVF_PMI_T1;
cccr |= P4_CCCR_OVF_PMI_T0;
if (escr & P4_ESCR_T1_OS) {
escr &= ~P4_ESCR_T1_OS;
escr |= P4_ESCR_T0_OS;
}
if (escr & P4_ESCR_T1_USR) {
escr &= ~P4_ESCR_T1_USR;
escr |= P4_ESCR_T0_USR;
}
hwc->config = p4_config_pack_escr(escr);
hwc->config |= p4_config_pack_cccr(cccr);
hwc->config &= ~P4_CONFIG_HT;
}
}
/*
* ESCR address hashing is tricky, ESCRs are not sequential
* in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
* the metric between any ESCRs is laid in range [0xa0,0xe1]
*
* so we make ~70% filled hashtable
*/
#define P4_ESCR_MSR_BASE 0x000003a0
#define P4_ESCR_MSR_MAX 0x000003e1
#define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
#define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
#define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = {
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1),
};
static int p4_get_escr_idx(unsigned int addr)
{
unsigned int idx = P4_ESCR_MSR_IDX(addr);
if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
!p4_escr_table[idx] ||
p4_escr_table[idx] != addr)) {
WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
return -1;
}
return idx;
}
static int p4_next_cntr(int thread, unsigned long *used_mask,
struct p4_event_bind *bind)
{
int i, j;
for (i = 0; i < P4_CNTR_LIMIT; i++) {
j = bind->cntr[thread][i];
if (j != -1 && !test_bit(j, used_mask))
return j;
}
return -1;
}
static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
int cpu = smp_processor_id();
struct hw_perf_event *hwc;
struct p4_event_bind *bind;
unsigned int i, thread, num;
int cntr_idx, escr_idx;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
for (i = 0, num = n; i < n; i++, num--) {
hwc = &cpuc->event_list[i]->hw;
thread = p4_ht_thread(cpu);
bind = p4_config_get_bind(hwc->config);
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
if (unlikely(escr_idx == -1))
goto done;
if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
cntr_idx = hwc->idx;
if (assign)
assign[i] = hwc->idx;
goto reserve;
}
cntr_idx = p4_next_cntr(thread, used_mask, bind);
if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
goto done;
p4_pmu_swap_config_ts(hwc, cpu);
if (assign)
assign[i] = cntr_idx;
reserve:
set_bit(cntr_idx, used_mask);
set_bit(escr_idx, escr_mask);
}
done:
return num ? -ENOSPC : 0;
}
static __initconst const struct x86_pmu p4_pmu = {
.name = "Netburst P4/Xeon",
.handle_irq = p4_pmu_handle_irq,
.disable_all = p4_pmu_disable_all,
.enable_all = p4_pmu_enable_all,
.enable = p4_pmu_enable_event,
.disable = p4_pmu_disable_event,
.eventsel = MSR_P4_BPU_CCCR0,
.perfctr = MSR_P4_BPU_PERFCTR0,
.event_map = p4_pmu_event_map,
.max_events = ARRAY_SIZE(p4_general_events),
.get_event_constraints = x86_get_event_constraints,
/*
* IF HT disabled we may need to use all
* ARCH_P4_MAX_CCCR counters simulaneously
* though leave it restricted at moment assuming
* HT is on
*/
.num_counters = ARCH_P4_MAX_CCCR,
.apic = 1,
.cntval_bits = ARCH_P4_CNTRVAL_BITS,
.cntval_mask = ARCH_P4_CNTRVAL_MASK,
.max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
.hw_watchdog_set_attr = p4_hw_watchdog_set_attr,
.hw_config = p4_hw_config,
.schedule_events = p4_pmu_schedule_events,
/*
* This handles erratum N15 in intel doc 249199-029,
* the counter may not be updated correctly on write
* so we need a second write operation to do the trick
* (the official workaround didn't work)
*
* the former idea is taken from OProfile code
*/
.perfctr_second_write = 1,
};
static __init int p4_pmu_init(void)
{
unsigned int low, high;
/* If we get stripped -- indexing fails */
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
if (!(low & (1 << 7))) {
pr_cont("unsupported Netburst CPU model %d ",
boot_cpu_data.x86_model);
return -ENODEV;
}
memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
pr_cont("Netburst events, ");
x86_pmu = p4_pmu;
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */
| /*
* Netburst Performance Events (P4, old Xeon)
*
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
*
* For licencing details see kernel-base/COPYING
*/
#ifdef CONFIG_CPU_SUP_INTEL
#include <asm/perf_event_p4.h>
#define P4_CNTR_LIMIT 3
/*
* array indices: 0,1 - HT threads, used with HT enabled cpu
*/
struct p4_event_bind {
unsigned int opcode; /* Event code and ESCR selector */
unsigned int escr_msr[2]; /* ESCR MSR for this event */
unsigned int escr_emask; /* valid ESCR EventMask bits */
unsigned int shared; /* event is shared across threads */
char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
};
struct p4_pebs_bind {
unsigned int metric_pebs;
unsigned int metric_vert;
};
/* it sets P4_PEBS_ENABLE_UOP_TAG as well */
#define P4_GEN_PEBS_BIND(name, pebs, vert) \
[P4_PEBS_METRIC__##name] = { \
.metric_pebs = pebs | P4_PEBS_ENABLE_UOP_TAG, \
.metric_vert = vert, \
}
/*
* note we have P4_PEBS_ENABLE_UOP_TAG always set here
*
* it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
* event configuration to find out which values are to be
* written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
* resgisters
*/
static struct p4_pebs_bind p4_pebs_bind_map[] = {
P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
P4_GEN_PEBS_BIND(2ndl_cache_load_miss_retired, 0x0000002, 0x0000001),
P4_GEN_PEBS_BIND(dtlb_load_miss_retired, 0x0000004, 0x0000001),
P4_GEN_PEBS_BIND(dtlb_store_miss_retired, 0x0000004, 0x0000002),
P4_GEN_PEBS_BIND(dtlb_all_miss_retired, 0x0000004, 0x0000003),
P4_GEN_PEBS_BIND(tagged_mispred_branch, 0x0018000, 0x0000010),
P4_GEN_PEBS_BIND(mob_load_replay_retired, 0x0000200, 0x0000001),
P4_GEN_PEBS_BIND(split_load_retired, 0x0000400, 0x0000001),
P4_GEN_PEBS_BIND(split_store_retired, 0x0000400, 0x0000002),
};
/*
* Note that we don't use CCCR1 here, there is an
* exception for P4_BSQ_ALLOCATION but we just have
* no workaround
*
* consider this binding as resources which particular
* event may borrow, it doesn't contain EventMask,
* Tags and friends -- they are left to a caller
*/
static struct p4_event_bind p4_event_bind_map[] = {
[P4_EVENT_TC_DELIVER_MODE] = {
.opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) |
P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID),
.shared = 1,
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_BPU_FETCH_REQUEST] = {
.opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
.escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_ITLB_REFERENCE] = {
.opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
.escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) |
P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_MEMORY_CANCEL] = {
.opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) |
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_MEMORY_COMPLETE] = {
.opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) |
P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_LOAD_PORT_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
.escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_STORE_PORT_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST),
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_MOB_LOAD_REPLAY] = {
.opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
.escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) |
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) |
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) |
P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_PAGE_WALK_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
.escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS),
.shared = 1,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BSQ_CACHE_REFERENCE] = {
.opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_IOQ_ALLOCATION] = {
.opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
.opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
.escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) |
P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH),
.cntr = { {2, -1, -1}, {3, -1, -1} },
},
[P4_EVENT_FSB_DATA_ACTIVITY] = {
.opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER),
.shared = 1,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
.opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2),
.cntr = { {0, -1, -1}, {1, -1, -1} },
},
[P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
.opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
.escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2),
.cntr = { {2, -1, -1}, {3, -1, -1} },
},
[P4_EVENT_SSE_INPUT_ASSIST] = {
.opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_PACKED_SP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_PACKED_DP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_SCALAR_SP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_SCALAR_DP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_64BIT_MMX_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_128BIT_MMX_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_X87_FP_UOP] = {
.opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_TC_MISC] = {
.opcode = P4_OPCODE(P4_EVENT_TC_MISC),
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_GLOBAL_POWER_EVENTS] = {
.opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING),
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_TC_MS_XFER] = {
.opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_UOP_QUEUE_WRITES] = {
.opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RETIRED_BRANCH_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT),
.cntr = { {4, 5, -1}, {6, 7, -1} },
},
[P4_EVENT_RESOURCE_STALL] = {
.opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
.escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_WC_BUFFER] = {
.opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) |
P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS),
.shared = 1,
.cntr = { {8, 9, -1}, {10, 11, -1} },
},
[P4_EVENT_B2B_CYCLES] = {
.opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_BNR] = {
.opcode = P4_OPCODE(P4_EVENT_BNR),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_SNOOP] = {
.opcode = P4_OPCODE(P4_EVENT_SNOOP),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_RESPONSE] = {
.opcode = P4_OPCODE(P4_EVENT_RESPONSE),
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
.escr_emask = 0,
.cntr = { {0, -1, -1}, {2, -1, -1} },
},
[P4_EVENT_FRONT_END_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_EXECUTION_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_REPLAY_EVENT] = {
.opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_INSTR_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_UOPS_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_UOP_TYPE] = {
.opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
.escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) |
P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_BRANCH_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) |
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) |
P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_MISPRED_BRANCH_RETIRED] = {
.opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_X87_ASSIST] = {
.opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) |
P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_MACHINE_CLEAR] = {
.opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) |
P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) |
P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
[P4_EVENT_INSTR_COMPLETED] = {
.opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
.escr_emask =
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS),
.cntr = { {12, 13, 16}, {14, 15, 17} },
},
};
#define P4_GEN_CACHE_EVENT(event, bit, metric) \
p4_config_pack_escr(P4_ESCR_EVENT(event) | \
P4_ESCR_EMASK_BIT(event, bit)) | \
p4_config_pack_cccr(metric | \
P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
static __initconst const u64 p4_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__1stl_cache_load_miss_retired),
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__2ndl_cache_load_miss_retired),
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__dtlb_load_miss_retired),
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
P4_PEBS_METRIC__dtlb_store_miss_retired),
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
P4_PEBS_METRIC__none),
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
P4_PEBS_METRIC__none),
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
/* non-halted CPU clocks */
[PERF_COUNT_HW_CPU_CYCLES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
/*
* retired instructions
* in a sake of simplicity we don't use the FSB tagging
*/
[PERF_COUNT_HW_INSTRUCTIONS] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
/* cache hits */
[PERF_COUNT_HW_CACHE_REFERENCES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
/* cache misses */
[PERF_COUNT_HW_CACHE_MISSES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
/* branch instructions retired */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
/* mispredicted branches retired */
[PERF_COUNT_HW_BRANCH_MISSES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
/* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
[PERF_COUNT_HW_BUS_CYCLES] =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
};
static struct p4_event_bind *p4_config_get_bind(u64 config)
{
unsigned int evnt = p4_config_unpack_event(config);
struct p4_event_bind *bind = NULL;
if (evnt < ARRAY_SIZE(p4_event_bind_map))
bind = &p4_event_bind_map[evnt];
return bind;
}
static u64 p4_pmu_event_map(int hw_event)
{
struct p4_event_bind *bind;
unsigned int esel;
u64 config;
config = p4_general_events[hw_event];
bind = p4_config_get_bind(config);
esel = P4_OPCODE_ESEL(bind->opcode);
config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
return config;
}
/* check cpu model specifics */
static bool p4_event_match_cpu_model(unsigned int event_idx)
{
/* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
if (event_idx == P4_EVENT_INSTR_COMPLETED) {
if (boot_cpu_data.x86_model != 3 &&
boot_cpu_data.x86_model != 4 &&
boot_cpu_data.x86_model != 6)
return false;
}
/*
* For info
* - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2
*/
return true;
}
static int p4_validate_raw_event(struct perf_event *event)
{
unsigned int v, emask;
/* User data may have out-of-bound event index */
v = p4_config_unpack_event(event->attr.config);
if (v >= ARRAY_SIZE(p4_event_bind_map))
return -EINVAL;
/* It may be unsupported: */
if (!p4_event_match_cpu_model(v))
return -EINVAL;
/*
* NOTE: P4_CCCR_THREAD_ANY has not the same meaning as
* in Architectural Performance Monitoring, it means not
* on _which_ logical cpu to count but rather _when_, ie it
* depends on logical cpu state -- count event if one cpu active,
* none, both or any, so we just allow user to pass any value
* desired.
*
* In turn we always set Tx_OS/Tx_USR bits bound to logical
* cpu without their propagation to another cpu
*/
/*
* if an event is shared across the logical threads
* the user needs special permissions to be able to use it
*/
if (p4_ht_active() && p4_event_bind_map[v].shared) {
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
/* ESCR EventMask bits may be invalid */
emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK;
if (emask & ~p4_event_bind_map[v].escr_emask)
return -EINVAL;
/*
* it may have some invalid PEBS bits
*/
if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE))
return -EINVAL;
v = p4_config_unpack_metric(event->attr.config);
if (v >= ARRAY_SIZE(p4_pebs_bind_map))
return -EINVAL;
return 0;
}
static void p4_hw_watchdog_set_attr(struct perf_event_attr *wd_attr)
{
/*
* Watchdog ticks are special on Netburst, we use
* that named "non-sleeping" ticks as recommended
* by Intel SDM Vol3b.
*/
WARN_ON_ONCE(wd_attr->type != PERF_TYPE_HARDWARE ||
wd_attr->config != PERF_COUNT_HW_CPU_CYCLES);
wd_attr->type = PERF_TYPE_RAW;
wd_attr->config =
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) |
P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3)) |
p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT |
P4_CCCR_COMPARE);
}
static int p4_hw_config(struct perf_event *event)
{
int cpu = get_cpu();
int rc = 0;
u32 escr, cccr;
/*
* the reason we use cpu that early is that: if we get scheduled
* first time on the same cpu -- we will not need swap thread
* specific flags in config (and will save some cpu cycles)
*/
cccr = p4_default_cccr_conf(cpu);
escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
event->attr.exclude_user);
event->hw.config = p4_config_pack_escr(escr) |
p4_config_pack_cccr(cccr);
if (p4_ht_active() && p4_ht_thread(cpu))
event->hw.config = p4_set_ht_bit(event->hw.config);
if (event->attr.type == PERF_TYPE_RAW) {
struct p4_event_bind *bind;
unsigned int esel;
/*
* Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space
*/
event->attr.config &= P4_CONFIG_MASK;
rc = p4_validate_raw_event(event);
if (rc)
goto out;
/*
* Note that for RAW events we allow user to use P4_CCCR_RESERVED
* bits since we keep additional info here (for cache events and etc)
*/
event->hw.config |= event->attr.config;
bind = p4_config_get_bind(event->attr.config);
if (!bind) {
rc = -EINVAL;
goto out;
}
esel = P4_OPCODE_ESEL(bind->opcode);
event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
}
rc = x86_setup_perfctr(event);
out:
put_cpu();
return rc;
}
static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
{
u64 v;
/* an official way for overflow indication */
rdmsrl(hwc->config_base, v);
if (v & P4_CCCR_OVF) {
wrmsrl(hwc->config_base, v & ~P4_CCCR_OVF);
return 1;
}
/*
* In some circumstances the overflow might issue an NMI but did
* not set P4_CCCR_OVF bit. Because a counter holds a negative value
* we simply check for high bit being set, if it's cleared it means
* the counter has reached zero value and continued counting before
* real NMI signal was received:
*/
rdmsrl(hwc->event_base, v);
if (!(v & ARCH_P4_UNFLAGGED_BIT))
return 1;
return 0;
}
static void p4_pmu_disable_pebs(void)
{
/*
* FIXME
*
* It's still allowed that two threads setup same cache
* events so we can't simply clear metrics until we knew
* no one is depending on us, so we need kind of counter
* for "ReplayEvent" users.
*
* What is more complex -- RAW events, if user (for some
* reason) will pass some cache event metric with improper
* event opcode -- it's fine from hardware point of view
* but completely nonsense from "meaning" of such action.
*
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
*
* (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
* (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
*/
}
static inline void p4_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/*
* If event gets disabled while counter is in overflowed
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
(void)checking_wrmsrl(hwc->config_base,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
static void p4_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
p4_pmu_disable_event(event);
}
p4_pmu_disable_pebs();
}
/* configuration must be valid */
static void p4_pmu_enable_pebs(u64 config)
{
struct p4_pebs_bind *bind;
unsigned int idx;
BUILD_BUG_ON(P4_PEBS_METRIC__max > P4_PEBS_CONFIG_METRIC_MASK);
idx = p4_config_unpack_metric(config);
if (idx == P4_PEBS_METRIC__none)
return;
bind = &p4_pebs_bind_map[idx];
(void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
(void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
}
static void p4_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int thread = p4_ht_config_thread(hwc->config);
u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
unsigned int idx = p4_config_unpack_event(hwc->config);
struct p4_event_bind *bind;
u64 escr_addr, cccr;
bind = &p4_event_bind_map[idx];
escr_addr = (u64)bind->escr_msr[thread];
/*
* - we dont support cascaded counters yet
* - and counter 1 is broken (erratum)
*/
WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
WARN_ON_ONCE(hwc->idx == 1);
/* we need a real Event value */
escr_conf &= ~P4_ESCR_EVENT_MASK;
escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
cccr = p4_config_unpack_cccr(hwc->config);
/*
* it could be Cache event so we need to write metrics
* into additional MSRs
*/
p4_pmu_enable_pebs(hwc->config);
(void)checking_wrmsrl(escr_addr, escr_conf);
(void)checking_wrmsrl(hwc->config_base,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
static void p4_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
p4_pmu_enable_event(event);
}
}
static int p4_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int overflow;
if (!test_bit(idx, cpuc->active_mask)) {
/* catch in-flight IRQs */
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
hwc = &event->hw;
WARN_ON_ONCE(hwc->idx != idx);
/* it might be unflagged overflow */
overflow = p4_pmu_clear_cccr_ovf(hwc);
val = x86_perf_event_update(event);
if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
continue;
handled += overflow;
/* event overflow for sure */
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
if (handled)
inc_irq_stat(apic_perf_irqs);
/*
* When dealing with the unmasking of the LVTPC on P4 perf hw, it has
* been observed that the OVF bit flag has to be cleared first _before_
* the LVTPC can be unmasked.
*
* The reason is the NMI line will continue to be asserted while the OVF
* bit is set. This causes a second NMI to generate if the LVTPC is
* unmasked before the OVF bit is cleared, leading to unknown NMI
* messages.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
return handled;
}
/*
* swap thread specific fields according to a thread
* we are going to run on
*/
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
{
u32 escr, cccr;
/*
* we either lucky and continue on same cpu or no HT support
*/
if (!p4_should_swap_ts(hwc->config, cpu))
return;
/*
* the event is migrated from an another logical
* cpu, so we need to swap thread specific flags
*/
escr = p4_config_unpack_escr(hwc->config);
cccr = p4_config_unpack_cccr(hwc->config);
if (p4_ht_thread(cpu)) {
cccr &= ~P4_CCCR_OVF_PMI_T0;
cccr |= P4_CCCR_OVF_PMI_T1;
if (escr & P4_ESCR_T0_OS) {
escr &= ~P4_ESCR_T0_OS;
escr |= P4_ESCR_T1_OS;
}
if (escr & P4_ESCR_T0_USR) {
escr &= ~P4_ESCR_T0_USR;
escr |= P4_ESCR_T1_USR;
}
hwc->config = p4_config_pack_escr(escr);
hwc->config |= p4_config_pack_cccr(cccr);
hwc->config |= P4_CONFIG_HT;
} else {
cccr &= ~P4_CCCR_OVF_PMI_T1;
cccr |= P4_CCCR_OVF_PMI_T0;
if (escr & P4_ESCR_T1_OS) {
escr &= ~P4_ESCR_T1_OS;
escr |= P4_ESCR_T0_OS;
}
if (escr & P4_ESCR_T1_USR) {
escr &= ~P4_ESCR_T1_USR;
escr |= P4_ESCR_T0_USR;
}
hwc->config = p4_config_pack_escr(escr);
hwc->config |= p4_config_pack_cccr(cccr);
hwc->config &= ~P4_CONFIG_HT;
}
}
/*
* ESCR address hashing is tricky, ESCRs are not sequential
* in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
* the metric between any ESCRs is laid in range [0xa0,0xe1]
*
* so we make ~70% filled hashtable
*/
#define P4_ESCR_MSR_BASE 0x000003a0
#define P4_ESCR_MSR_MAX 0x000003e1
#define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
#define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
#define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = {
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0),
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1),
};
static int p4_get_escr_idx(unsigned int addr)
{
unsigned int idx = P4_ESCR_MSR_IDX(addr);
if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
!p4_escr_table[idx] ||
p4_escr_table[idx] != addr)) {
WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
return -1;
}
return idx;
}
static int p4_next_cntr(int thread, unsigned long *used_mask,
struct p4_event_bind *bind)
{
int i, j;
for (i = 0; i < P4_CNTR_LIMIT; i++) {
j = bind->cntr[thread][i];
if (j != -1 && !test_bit(j, used_mask))
return j;
}
return -1;
}
static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
int cpu = smp_processor_id();
struct hw_perf_event *hwc;
struct p4_event_bind *bind;
unsigned int i, thread, num;
int cntr_idx, escr_idx;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
for (i = 0, num = n; i < n; i++, num--) {
hwc = &cpuc->event_list[i]->hw;
thread = p4_ht_thread(cpu);
bind = p4_config_get_bind(hwc->config);
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
if (unlikely(escr_idx == -1))
goto done;
if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
cntr_idx = hwc->idx;
if (assign)
assign[i] = hwc->idx;
goto reserve;
}
cntr_idx = p4_next_cntr(thread, used_mask, bind);
if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
goto done;
p4_pmu_swap_config_ts(hwc, cpu);
if (assign)
assign[i] = cntr_idx;
reserve:
set_bit(cntr_idx, used_mask);
set_bit(escr_idx, escr_mask);
}
done:
return num ? -ENOSPC : 0;
}
static __initconst const struct x86_pmu p4_pmu = {
.name = "Netburst P4/Xeon",
.handle_irq = p4_pmu_handle_irq,
.disable_all = p4_pmu_disable_all,
.enable_all = p4_pmu_enable_all,
.enable = p4_pmu_enable_event,
.disable = p4_pmu_disable_event,
.eventsel = MSR_P4_BPU_CCCR0,
.perfctr = MSR_P4_BPU_PERFCTR0,
.event_map = p4_pmu_event_map,
.max_events = ARRAY_SIZE(p4_general_events),
.get_event_constraints = x86_get_event_constraints,
/*
* IF HT disabled we may need to use all
* ARCH_P4_MAX_CCCR counters simulaneously
* though leave it restricted at moment assuming
* HT is on
*/
.num_counters = ARCH_P4_MAX_CCCR,
.apic = 1,
.cntval_bits = ARCH_P4_CNTRVAL_BITS,
.cntval_mask = ARCH_P4_CNTRVAL_MASK,
.max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
.hw_watchdog_set_attr = p4_hw_watchdog_set_attr,
.hw_config = p4_hw_config,
.schedule_events = p4_pmu_schedule_events,
/*
* This handles erratum N15 in intel doc 249199-029,
* the counter may not be updated correctly on write
* so we need a second write operation to do the trick
* (the official workaround didn't work)
*
* the former idea is taken from OProfile code
*/
.perfctr_second_write = 1,
};
static __init int p4_pmu_init(void)
{
unsigned int low, high;
/* If we get stripped -- indexing fails */
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
if (!(low & (1 << 7))) {
pr_cont("unsupported Netburst CPU model %d ",
boot_cpu_data.x86_model);
return -ENODEV;
}
memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
pr_cont("Netburst events, ");
x86_pmu = p4_pmu;
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */
|
270,295,179,513,182 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*/
/*
* Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
* Copyright (C) 2000-2001 VERITAS Software Corporation.
* Copyright (C) 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
* Copyright (C) 2007 MontaVista Software, Inc.
* Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
*/
/****************************************************************************
* Contributor: Lake Stevens Instrument Division$
* Written by: Glenn Engel $
* Updated by: Amit Kale<akale@veritas.com>
* Updated by: Tom Rini <trini@kernel.crashing.org>
* Updated by: Jason Wessel <jason.wessel@windriver.com>
* Modified for 386 by Jim Kingdon, Cygnus Support.
* Origianl kgdb, compatibility with 2.1.xx kernel by
* David Grothe <dave@gcom.com>
* Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
* X86_64 changes from Andi Kleen's patch merged by Jim Houston
*/
#include <linux/spinlock.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/kgdb.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/hw_breakpoint.h>
#include <asm/debugreg.h>
#include <asm/apicdef.h>
#include <asm/system.h>
#include <asm/apic.h>
#include <asm/nmi.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
#ifdef CONFIG_X86_32
{ "ax", 4, offsetof(struct pt_regs, ax) },
{ "cx", 4, offsetof(struct pt_regs, cx) },
{ "dx", 4, offsetof(struct pt_regs, dx) },
{ "bx", 4, offsetof(struct pt_regs, bx) },
{ "sp", 4, offsetof(struct pt_regs, sp) },
{ "bp", 4, offsetof(struct pt_regs, bp) },
{ "si", 4, offsetof(struct pt_regs, si) },
{ "di", 4, offsetof(struct pt_regs, di) },
{ "ip", 4, offsetof(struct pt_regs, ip) },
{ "flags", 4, offsetof(struct pt_regs, flags) },
{ "cs", 4, offsetof(struct pt_regs, cs) },
{ "ss", 4, offsetof(struct pt_regs, ss) },
{ "ds", 4, offsetof(struct pt_regs, ds) },
{ "es", 4, offsetof(struct pt_regs, es) },
{ "fs", 4, -1 },
{ "gs", 4, -1 },
#else
{ "ax", 8, offsetof(struct pt_regs, ax) },
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, dx) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },
{ "r8", 8, offsetof(struct pt_regs, r8) },
{ "r9", 8, offsetof(struct pt_regs, r9) },
{ "r10", 8, offsetof(struct pt_regs, r10) },
{ "r11", 8, offsetof(struct pt_regs, r11) },
{ "r12", 8, offsetof(struct pt_regs, r12) },
{ "r13", 8, offsetof(struct pt_regs, r13) },
{ "r14", 8, offsetof(struct pt_regs, r14) },
{ "r15", 8, offsetof(struct pt_regs, r15) },
{ "ip", 8, offsetof(struct pt_regs, ip) },
{ "flags", 4, offsetof(struct pt_regs, flags) },
{ "cs", 4, offsetof(struct pt_regs, cs) },
{ "ss", 4, offsetof(struct pt_regs, ss) },
#endif
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (
#ifdef CONFIG_X86_32
regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
#endif
regno == GDB_SP || regno == GDB_ORIG_AX)
return 0;
if (dbg_reg_def[regno].offset != -1)
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
return 0;
}
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno == GDB_ORIG_AX) {
memcpy(mem, ®s->orig_ax, sizeof(regs->orig_ax));
return "orig_ax";
}
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].offset != -1)
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
#ifdef CONFIG_X86_32
switch (regno) {
case GDB_SS:
if (!user_mode_vm(regs))
*(unsigned long *)mem = __KERNEL_DS;
break;
case GDB_SP:
if (!user_mode_vm(regs))
*(unsigned long *)mem = kernel_stack_pointer(regs);
break;
case GDB_GS:
case GDB_FS:
*(unsigned long *)mem = 0xFFFF;
break;
}
#endif
return dbg_reg_def[regno].name;
}
/**
* sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
* @p: The &struct task_struct of the desired process.
*
* Convert the register values of the sleeping process in @p to
* the format that GDB expects.
* This function is called when kgdb does not have access to the
* &struct pt_regs and therefore it should fill the gdb registers
* @gdb_regs with what has been saved in &struct thread_struct
* thread field during switch_to.
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
#ifndef CONFIG_X86_32
u32 *gdb_regs32 = (u32 *)gdb_regs;
#endif
gdb_regs[GDB_AX] = 0;
gdb_regs[GDB_BX] = 0;
gdb_regs[GDB_CX] = 0;
gdb_regs[GDB_DX] = 0;
gdb_regs[GDB_SI] = 0;
gdb_regs[GDB_DI] = 0;
gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
#ifdef CONFIG_X86_32
gdb_regs[GDB_DS] = __KERNEL_DS;
gdb_regs[GDB_ES] = __KERNEL_DS;
gdb_regs[GDB_PS] = 0;
gdb_regs[GDB_CS] = __KERNEL_CS;
gdb_regs[GDB_PC] = p->thread.ip;
gdb_regs[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_FS] = 0xFFFF;
gdb_regs[GDB_GS] = 0xFFFF;
#else
gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
gdb_regs32[GDB_CS] = __KERNEL_CS;
gdb_regs32[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_PC] = 0;
gdb_regs[GDB_R8] = 0;
gdb_regs[GDB_R9] = 0;
gdb_regs[GDB_R10] = 0;
gdb_regs[GDB_R11] = 0;
gdb_regs[GDB_R12] = 0;
gdb_regs[GDB_R13] = 0;
gdb_regs[GDB_R14] = 0;
gdb_regs[GDB_R15] = 0;
#endif
gdb_regs[GDB_SP] = p->thread.sp;
}
static struct hw_breakpoint {
unsigned enabled;
unsigned long addr;
int len;
int type;
struct perf_event * __percpu *pev;
} breakinfo[HBP_NUM];
static unsigned long early_dr7;
static void kgdb_correct_hw_break(void)
{
int breakno;
for (breakno = 0; breakno < HBP_NUM; breakno++) {
struct perf_event *bp;
struct arch_hw_breakpoint *info;
int val;
int cpu = raw_smp_processor_id();
if (!breakinfo[breakno].enabled)
continue;
if (dbg_is_early) {
set_debugreg(breakinfo[breakno].addr, breakno);
early_dr7 |= encode_dr7(breakno,
breakinfo[breakno].len,
breakinfo[breakno].type);
set_debugreg(early_dr7, 7);
continue;
}
bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
info = counter_arch_bp(bp);
if (bp->attr.disabled != 1)
continue;
bp->attr.bp_addr = breakinfo[breakno].addr;
bp->attr.bp_len = breakinfo[breakno].len;
bp->attr.bp_type = breakinfo[breakno].type;
info->address = breakinfo[breakno].addr;
info->len = breakinfo[breakno].len;
info->type = breakinfo[breakno].type;
val = arch_install_hw_breakpoint(bp);
if (!val)
bp->attr.disabled = 0;
}
if (!dbg_is_early)
hw_breakpoint_restore();
}
static int hw_break_reserve_slot(int breakno)
{
int cpu;
int cnt = 0;
struct perf_event **pevent;
if (dbg_is_early)
return 0;
for_each_online_cpu(cpu) {
cnt++;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_reserve_bp_slot(*pevent))
goto fail;
}
return 0;
fail:
for_each_online_cpu(cpu) {
cnt--;
if (!cnt)
break;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
dbg_release_bp_slot(*pevent);
}
return -1;
}
static int hw_break_release_slot(int breakno)
{
struct perf_event **pevent;
int cpu;
if (dbg_is_early)
return 0;
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
/*
* The debugger is responsible for handing the retry on
* remove failure.
*/
return -1;
}
return 0;
}
static int
kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
int i;
for (i = 0; i < HBP_NUM; i++)
if (breakinfo[i].addr == addr && breakinfo[i].enabled)
break;
if (i == HBP_NUM)
return -1;
if (hw_break_release_slot(i)) {
printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
return -1;
}
breakinfo[i].enabled = 0;
return 0;
}
static void kgdb_remove_all_hw_break(void)
{
int i;
int cpu = raw_smp_processor_id();
struct perf_event *bp;
for (i = 0; i < HBP_NUM; i++) {
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (!bp->attr.disabled) {
arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
continue;
}
if (dbg_is_early)
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
else if (hw_break_release_slot(i))
printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
breakinfo[i].addr);
breakinfo[i].enabled = 0;
}
}
static int
kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
int i;
for (i = 0; i < HBP_NUM; i++)
if (!breakinfo[i].enabled)
break;
if (i == HBP_NUM)
return -1;
switch (bptype) {
case BP_HARDWARE_BREAKPOINT:
len = 1;
breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
break;
case BP_WRITE_WATCHPOINT:
breakinfo[i].type = X86_BREAKPOINT_WRITE;
break;
case BP_ACCESS_WATCHPOINT:
breakinfo[i].type = X86_BREAKPOINT_RW;
break;
default:
return -1;
}
switch (len) {
case 1:
breakinfo[i].len = X86_BREAKPOINT_LEN_1;
break;
case 2:
breakinfo[i].len = X86_BREAKPOINT_LEN_2;
break;
case 4:
breakinfo[i].len = X86_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case 8:
breakinfo[i].len = X86_BREAKPOINT_LEN_8;
break;
#endif
default:
return -1;
}
breakinfo[i].addr = addr;
if (hw_break_reserve_slot(i)) {
breakinfo[i].addr = 0;
return -1;
}
breakinfo[i].enabled = 1;
return 0;
}
/**
* kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
* @regs: Current &struct pt_regs.
*
* This function will be called if the particular architecture must
* disable hardware debugging while it is processing gdb packets or
* handling exception.
*/
static void kgdb_disable_hw_debug(struct pt_regs *regs)
{
int i;
int cpu = raw_smp_processor_id();
struct perf_event *bp;
/* Disable hardware debugging while we are in kgdb: */
set_debugreg(0UL, 7);
for (i = 0; i < HBP_NUM; i++) {
if (!breakinfo[i].enabled)
continue;
if (dbg_is_early) {
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
continue;
}
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
}
}
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
void kgdb_roundup_cpus(unsigned long flags)
{
apic->send_IPI_allbutself(APIC_DM_NMI);
}
#endif
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
* @vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
* @remcom_in_buffer: The buffer of the packet we have read.
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
* @regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
* If there are additional packets which the hardware needs to handle,
* they are handled here. The code should return -1 if it wants to
* process more packets, and a %0 or %1 if it wants to exit from the
* kgdb callback.
*/
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
switch (remcomInBuffer[0]) {
case 'c':
case 's':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->ip = addr;
case 'D':
case 'k':
/* clear the trace bit */
linux_regs->flags &= ~X86_EFLAGS_TF;
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
if (remcomInBuffer[0] == 's') {
linux_regs->flags |= X86_EFLAGS_TF;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
/* this means that we do not want to exit from the handler: */
return -1;
}
static inline int
single_step_cont(struct pt_regs *regs, struct die_args *args)
{
/*
* Single step exception from kernel space to user space so
* eat the exception and continue the process:
*/
printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
"resuming...\n");
kgdb_arch_handle_exception(args->trapnr, args->signr,
args->err, "c", "", regs);
/*
* Reset the BS bit in dr6 (pointed by args->err) to
* denote completion of processing
*/
(*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
return NOTIFY_STOP;
}
static int was_in_debug_nmi[NR_CPUS];
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_NMI:
if (atomic_read(&kgdb_active) != -1) {
/* KGDB CPU roundup */
kgdb_nmicallback(raw_smp_processor_id(), regs);
was_in_debug_nmi[raw_smp_processor_id()] = 1;
touch_nmi_watchdog();
return NOTIFY_STOP;
}
return NOTIFY_DONE;
case DIE_NMIUNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0;
return NOTIFY_STOP;
}
return NOTIFY_DONE;
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
return single_step_cont(regs, args);
break;
} else if (test_thread_flag(TIF_SINGLESTEP))
/* This means a user thread is single stepping
* a system call which should be ignored
*/
return NOTIFY_DONE;
/* fall through */
default:
if (user_mode(regs))
return NOTIFY_DONE;
}
if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
return NOTIFY_DONE;
/* Must touch watchdog before return to normal operation */
touch_nmi_watchdog();
return NOTIFY_STOP;
}
int kgdb_ll_trap(int cmd, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig,
};
if (!kgdb_io_module_registered)
return NOTIFY_DONE;
return __kgdb_notify(&args, cmd);
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = NMI_LOCAL_LOW_PRIOR,
};
/**
* kgdb_arch_init - Perform any architecture specific initalization.
*
* This function will handle the initalization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct task_struct *tsk = current;
int i;
for (i = 0; i < 4; i++)
if (breakinfo[i].enabled)
tsk->thread.debugreg6 |= (DR_TRAP0 << i);
}
void kgdb_arch_late(void)
{
int i, cpu;
struct perf_event_attr attr;
struct perf_event **pevent;
/*
* Pre-allocate the hw breakpoint structions in the non-atomic
* portion of kgdb because this operation requires mutexs to
* complete.
*/
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)kgdb_arch_init;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
for (i = 0; i < HBP_NUM; i++) {
if (breakinfo[i].pev)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
if (IS_ERR((void * __force)breakinfo[i].pev)) {
printk(KERN_ERR "kgdb: Could not allocate hw"
"breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
kgdb_arch_exit();
return;
}
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
pevent[0]->hw.sample_period = 1;
pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
if (pevent[0]->destroy != NULL) {
pevent[0]->destroy = NULL;
release_bp_slot(*pevent);
}
}
}
}
/**
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
int i;
for (i = 0; i < 4; i++) {
if (breakinfo[i].pev) {
unregister_wide_hw_breakpoint(breakinfo[i].pev);
breakinfo[i].pev = NULL;
}
}
unregister_die_notifier(&kgdb_notifier);
}
/**
*
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
* @exception: Exception vector number
* @regs: Current &struct pt_regs.
*
* On some architectures we need to skip a breakpoint exception when
* it occurs after a breakpoint has been removed.
*
* Skip an int3 exception when it occurs after a breakpoint has been
* removed. Backtrack eip by 1 since the int3 would have caused it to
* increment by 1.
*/
int kgdb_skipexception(int exception, struct pt_regs *regs)
{
if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
regs->ip -= 1;
return 1;
}
return 0;
}
unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
{
if (exception == 3)
return instruction_pointer(regs) - 1;
return instruction_pointer(regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
.flags = KGDB_HW_BREAKPOINT,
.set_hw_breakpoint = kgdb_set_hw_break,
.remove_hw_breakpoint = kgdb_remove_hw_break,
.disable_hw_break = kgdb_disable_hw_debug,
.remove_all_hw_break = kgdb_remove_all_hw_break,
.correct_hw_break = kgdb_correct_hw_break,
};
| /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*/
/*
* Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
* Copyright (C) 2000-2001 VERITAS Software Corporation.
* Copyright (C) 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
* Copyright (C) 2007 MontaVista Software, Inc.
* Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
*/
/****************************************************************************
* Contributor: Lake Stevens Instrument Division$
* Written by: Glenn Engel $
* Updated by: Amit Kale<akale@veritas.com>
* Updated by: Tom Rini <trini@kernel.crashing.org>
* Updated by: Jason Wessel <jason.wessel@windriver.com>
* Modified for 386 by Jim Kingdon, Cygnus Support.
* Origianl kgdb, compatibility with 2.1.xx kernel by
* David Grothe <dave@gcom.com>
* Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
* X86_64 changes from Andi Kleen's patch merged by Jim Houston
*/
#include <linux/spinlock.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/kgdb.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/hw_breakpoint.h>
#include <asm/debugreg.h>
#include <asm/apicdef.h>
#include <asm/system.h>
#include <asm/apic.h>
#include <asm/nmi.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
#ifdef CONFIG_X86_32
{ "ax", 4, offsetof(struct pt_regs, ax) },
{ "cx", 4, offsetof(struct pt_regs, cx) },
{ "dx", 4, offsetof(struct pt_regs, dx) },
{ "bx", 4, offsetof(struct pt_regs, bx) },
{ "sp", 4, offsetof(struct pt_regs, sp) },
{ "bp", 4, offsetof(struct pt_regs, bp) },
{ "si", 4, offsetof(struct pt_regs, si) },
{ "di", 4, offsetof(struct pt_regs, di) },
{ "ip", 4, offsetof(struct pt_regs, ip) },
{ "flags", 4, offsetof(struct pt_regs, flags) },
{ "cs", 4, offsetof(struct pt_regs, cs) },
{ "ss", 4, offsetof(struct pt_regs, ss) },
{ "ds", 4, offsetof(struct pt_regs, ds) },
{ "es", 4, offsetof(struct pt_regs, es) },
{ "fs", 4, -1 },
{ "gs", 4, -1 },
#else
{ "ax", 8, offsetof(struct pt_regs, ax) },
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, dx) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },
{ "r8", 8, offsetof(struct pt_regs, r8) },
{ "r9", 8, offsetof(struct pt_regs, r9) },
{ "r10", 8, offsetof(struct pt_regs, r10) },
{ "r11", 8, offsetof(struct pt_regs, r11) },
{ "r12", 8, offsetof(struct pt_regs, r12) },
{ "r13", 8, offsetof(struct pt_regs, r13) },
{ "r14", 8, offsetof(struct pt_regs, r14) },
{ "r15", 8, offsetof(struct pt_regs, r15) },
{ "ip", 8, offsetof(struct pt_regs, ip) },
{ "flags", 4, offsetof(struct pt_regs, flags) },
{ "cs", 4, offsetof(struct pt_regs, cs) },
{ "ss", 4, offsetof(struct pt_regs, ss) },
#endif
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (
#ifdef CONFIG_X86_32
regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
#endif
regno == GDB_SP || regno == GDB_ORIG_AX)
return 0;
if (dbg_reg_def[regno].offset != -1)
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
return 0;
}
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno == GDB_ORIG_AX) {
memcpy(mem, ®s->orig_ax, sizeof(regs->orig_ax));
return "orig_ax";
}
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].offset != -1)
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
#ifdef CONFIG_X86_32
switch (regno) {
case GDB_SS:
if (!user_mode_vm(regs))
*(unsigned long *)mem = __KERNEL_DS;
break;
case GDB_SP:
if (!user_mode_vm(regs))
*(unsigned long *)mem = kernel_stack_pointer(regs);
break;
case GDB_GS:
case GDB_FS:
*(unsigned long *)mem = 0xFFFF;
break;
}
#endif
return dbg_reg_def[regno].name;
}
/**
* sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
* @p: The &struct task_struct of the desired process.
*
* Convert the register values of the sleeping process in @p to
* the format that GDB expects.
* This function is called when kgdb does not have access to the
* &struct pt_regs and therefore it should fill the gdb registers
* @gdb_regs with what has been saved in &struct thread_struct
* thread field during switch_to.
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
#ifndef CONFIG_X86_32
u32 *gdb_regs32 = (u32 *)gdb_regs;
#endif
gdb_regs[GDB_AX] = 0;
gdb_regs[GDB_BX] = 0;
gdb_regs[GDB_CX] = 0;
gdb_regs[GDB_DX] = 0;
gdb_regs[GDB_SI] = 0;
gdb_regs[GDB_DI] = 0;
gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
#ifdef CONFIG_X86_32
gdb_regs[GDB_DS] = __KERNEL_DS;
gdb_regs[GDB_ES] = __KERNEL_DS;
gdb_regs[GDB_PS] = 0;
gdb_regs[GDB_CS] = __KERNEL_CS;
gdb_regs[GDB_PC] = p->thread.ip;
gdb_regs[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_FS] = 0xFFFF;
gdb_regs[GDB_GS] = 0xFFFF;
#else
gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
gdb_regs32[GDB_CS] = __KERNEL_CS;
gdb_regs32[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_PC] = 0;
gdb_regs[GDB_R8] = 0;
gdb_regs[GDB_R9] = 0;
gdb_regs[GDB_R10] = 0;
gdb_regs[GDB_R11] = 0;
gdb_regs[GDB_R12] = 0;
gdb_regs[GDB_R13] = 0;
gdb_regs[GDB_R14] = 0;
gdb_regs[GDB_R15] = 0;
#endif
gdb_regs[GDB_SP] = p->thread.sp;
}
static struct hw_breakpoint {
unsigned enabled;
unsigned long addr;
int len;
int type;
struct perf_event * __percpu *pev;
} breakinfo[HBP_NUM];
static unsigned long early_dr7;
static void kgdb_correct_hw_break(void)
{
int breakno;
for (breakno = 0; breakno < HBP_NUM; breakno++) {
struct perf_event *bp;
struct arch_hw_breakpoint *info;
int val;
int cpu = raw_smp_processor_id();
if (!breakinfo[breakno].enabled)
continue;
if (dbg_is_early) {
set_debugreg(breakinfo[breakno].addr, breakno);
early_dr7 |= encode_dr7(breakno,
breakinfo[breakno].len,
breakinfo[breakno].type);
set_debugreg(early_dr7, 7);
continue;
}
bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
info = counter_arch_bp(bp);
if (bp->attr.disabled != 1)
continue;
bp->attr.bp_addr = breakinfo[breakno].addr;
bp->attr.bp_len = breakinfo[breakno].len;
bp->attr.bp_type = breakinfo[breakno].type;
info->address = breakinfo[breakno].addr;
info->len = breakinfo[breakno].len;
info->type = breakinfo[breakno].type;
val = arch_install_hw_breakpoint(bp);
if (!val)
bp->attr.disabled = 0;
}
if (!dbg_is_early)
hw_breakpoint_restore();
}
static int hw_break_reserve_slot(int breakno)
{
int cpu;
int cnt = 0;
struct perf_event **pevent;
if (dbg_is_early)
return 0;
for_each_online_cpu(cpu) {
cnt++;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_reserve_bp_slot(*pevent))
goto fail;
}
return 0;
fail:
for_each_online_cpu(cpu) {
cnt--;
if (!cnt)
break;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
dbg_release_bp_slot(*pevent);
}
return -1;
}
static int hw_break_release_slot(int breakno)
{
struct perf_event **pevent;
int cpu;
if (dbg_is_early)
return 0;
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
/*
* The debugger is responsible for handing the retry on
* remove failure.
*/
return -1;
}
return 0;
}
static int
kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
int i;
for (i = 0; i < HBP_NUM; i++)
if (breakinfo[i].addr == addr && breakinfo[i].enabled)
break;
if (i == HBP_NUM)
return -1;
if (hw_break_release_slot(i)) {
printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
return -1;
}
breakinfo[i].enabled = 0;
return 0;
}
static void kgdb_remove_all_hw_break(void)
{
int i;
int cpu = raw_smp_processor_id();
struct perf_event *bp;
for (i = 0; i < HBP_NUM; i++) {
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (!bp->attr.disabled) {
arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
continue;
}
if (dbg_is_early)
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
else if (hw_break_release_slot(i))
printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
breakinfo[i].addr);
breakinfo[i].enabled = 0;
}
}
static int
kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
int i;
for (i = 0; i < HBP_NUM; i++)
if (!breakinfo[i].enabled)
break;
if (i == HBP_NUM)
return -1;
switch (bptype) {
case BP_HARDWARE_BREAKPOINT:
len = 1;
breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
break;
case BP_WRITE_WATCHPOINT:
breakinfo[i].type = X86_BREAKPOINT_WRITE;
break;
case BP_ACCESS_WATCHPOINT:
breakinfo[i].type = X86_BREAKPOINT_RW;
break;
default:
return -1;
}
switch (len) {
case 1:
breakinfo[i].len = X86_BREAKPOINT_LEN_1;
break;
case 2:
breakinfo[i].len = X86_BREAKPOINT_LEN_2;
break;
case 4:
breakinfo[i].len = X86_BREAKPOINT_LEN_4;
break;
#ifdef CONFIG_X86_64
case 8:
breakinfo[i].len = X86_BREAKPOINT_LEN_8;
break;
#endif
default:
return -1;
}
breakinfo[i].addr = addr;
if (hw_break_reserve_slot(i)) {
breakinfo[i].addr = 0;
return -1;
}
breakinfo[i].enabled = 1;
return 0;
}
/**
* kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
* @regs: Current &struct pt_regs.
*
* This function will be called if the particular architecture must
* disable hardware debugging while it is processing gdb packets or
* handling exception.
*/
static void kgdb_disable_hw_debug(struct pt_regs *regs)
{
int i;
int cpu = raw_smp_processor_id();
struct perf_event *bp;
/* Disable hardware debugging while we are in kgdb: */
set_debugreg(0UL, 7);
for (i = 0; i < HBP_NUM; i++) {
if (!breakinfo[i].enabled)
continue;
if (dbg_is_early) {
early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
breakinfo[i].type);
continue;
}
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
}
}
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
void kgdb_roundup_cpus(unsigned long flags)
{
apic->send_IPI_allbutself(APIC_DM_NMI);
}
#endif
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
* @vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
* @remcom_in_buffer: The buffer of the packet we have read.
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
* @regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
* If there are additional packets which the hardware needs to handle,
* they are handled here. The code should return -1 if it wants to
* process more packets, and a %0 or %1 if it wants to exit from the
* kgdb callback.
*/
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
switch (remcomInBuffer[0]) {
case 'c':
case 's':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->ip = addr;
case 'D':
case 'k':
/* clear the trace bit */
linux_regs->flags &= ~X86_EFLAGS_TF;
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
if (remcomInBuffer[0] == 's') {
linux_regs->flags |= X86_EFLAGS_TF;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
/* this means that we do not want to exit from the handler: */
return -1;
}
static inline int
single_step_cont(struct pt_regs *regs, struct die_args *args)
{
/*
* Single step exception from kernel space to user space so
* eat the exception and continue the process:
*/
printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
"resuming...\n");
kgdb_arch_handle_exception(args->trapnr, args->signr,
args->err, "c", "", regs);
/*
* Reset the BS bit in dr6 (pointed by args->err) to
* denote completion of processing
*/
(*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
return NOTIFY_STOP;
}
static int was_in_debug_nmi[NR_CPUS];
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_NMI:
if (atomic_read(&kgdb_active) != -1) {
/* KGDB CPU roundup */
kgdb_nmicallback(raw_smp_processor_id(), regs);
was_in_debug_nmi[raw_smp_processor_id()] = 1;
touch_nmi_watchdog();
return NOTIFY_STOP;
}
return NOTIFY_DONE;
case DIE_NMIUNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0;
return NOTIFY_STOP;
}
return NOTIFY_DONE;
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
return single_step_cont(regs, args);
break;
} else if (test_thread_flag(TIF_SINGLESTEP))
/* This means a user thread is single stepping
* a system call which should be ignored
*/
return NOTIFY_DONE;
/* fall through */
default:
if (user_mode(regs))
return NOTIFY_DONE;
}
if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
return NOTIFY_DONE;
/* Must touch watchdog before return to normal operation */
touch_nmi_watchdog();
return NOTIFY_STOP;
}
int kgdb_ll_trap(int cmd, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig,
};
if (!kgdb_io_module_registered)
return NOTIFY_DONE;
return __kgdb_notify(&args, cmd);
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = NMI_LOCAL_LOW_PRIOR,
};
/**
* kgdb_arch_init - Perform any architecture specific initalization.
*
* This function will handle the initalization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
static void kgdb_hw_overflow_handler(struct perf_event *event,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct task_struct *tsk = current;
int i;
for (i = 0; i < 4; i++)
if (breakinfo[i].enabled)
tsk->thread.debugreg6 |= (DR_TRAP0 << i);
}
void kgdb_arch_late(void)
{
int i, cpu;
struct perf_event_attr attr;
struct perf_event **pevent;
/*
* Pre-allocate the hw breakpoint structions in the non-atomic
* portion of kgdb because this operation requires mutexs to
* complete.
*/
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)kgdb_arch_init;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
for (i = 0; i < HBP_NUM; i++) {
if (breakinfo[i].pev)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL);
if (IS_ERR((void * __force)breakinfo[i].pev)) {
printk(KERN_ERR "kgdb: Could not allocate hw"
"breakpoints\nDisabling the kernel debugger\n");
breakinfo[i].pev = NULL;
kgdb_arch_exit();
return;
}
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
pevent[0]->hw.sample_period = 1;
pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
if (pevent[0]->destroy != NULL) {
pevent[0]->destroy = NULL;
release_bp_slot(*pevent);
}
}
}
}
/**
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
int i;
for (i = 0; i < 4; i++) {
if (breakinfo[i].pev) {
unregister_wide_hw_breakpoint(breakinfo[i].pev);
breakinfo[i].pev = NULL;
}
}
unregister_die_notifier(&kgdb_notifier);
}
/**
*
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
* @exception: Exception vector number
* @regs: Current &struct pt_regs.
*
* On some architectures we need to skip a breakpoint exception when
* it occurs after a breakpoint has been removed.
*
* Skip an int3 exception when it occurs after a breakpoint has been
* removed. Backtrack eip by 1 since the int3 would have caused it to
* increment by 1.
*/
int kgdb_skipexception(int exception, struct pt_regs *regs)
{
if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
regs->ip -= 1;
return 1;
}
return 0;
}
unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
{
if (exception == 3)
return instruction_pointer(regs) - 1;
return instruction_pointer(regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
.flags = KGDB_HW_BREAKPOINT,
.set_hw_breakpoint = kgdb_set_hw_break,
.remove_hw_breakpoint = kgdb_remove_hw_break,
.disable_hw_break = kgdb_disable_hw_debug,
.remove_all_hw_break = kgdb_remove_all_hw_break,
.correct_hw_break = kgdb_correct_hw_break,
};
|
108,998,368,648,004 | /* By Ross Biro 1/23/92 */
/*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/debugreg.h>
#include <asm/ldt.h>
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/proto.h>
#include <asm/hw_breakpoint.h>
#include "tls.h"
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
enum x86_regset {
REGSET_GENERAL,
REGSET_FP,
REGSET_XFP,
REGSET_IOPERM64 = REGSET_XFP,
REGSET_XSTATE,
REGSET_TLS,
REGSET_IOPERM32,
};
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
#ifdef CONFIG_X86_64
REG_OFFSET_NAME(r15),
REG_OFFSET_NAME(r14),
REG_OFFSET_NAME(r13),
REG_OFFSET_NAME(r12),
REG_OFFSET_NAME(r11),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r8),
#endif
REG_OFFSET_NAME(bx),
REG_OFFSET_NAME(cx),
REG_OFFSET_NAME(dx),
REG_OFFSET_NAME(si),
REG_OFFSET_NAME(di),
REG_OFFSET_NAME(bp),
REG_OFFSET_NAME(ax),
#ifdef CONFIG_X86_32
REG_OFFSET_NAME(ds),
REG_OFFSET_NAME(es),
REG_OFFSET_NAME(fs),
REG_OFFSET_NAME(gs),
#endif
REG_OFFSET_NAME(orig_ax),
REG_OFFSET_NAME(ip),
REG_OFFSET_NAME(cs),
REG_OFFSET_NAME(flags),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(ss),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
static const int arg_offs_table[] = {
#ifdef CONFIG_X86_32
[0] = offsetof(struct pt_regs, ax),
[1] = offsetof(struct pt_regs, dx),
[2] = offsetof(struct pt_regs, cx)
#else /* CONFIG_X86_64 */
[0] = offsetof(struct pt_regs, di),
[1] = offsetof(struct pt_regs, si),
[2] = offsetof(struct pt_regs, dx),
[3] = offsetof(struct pt_regs, cx),
[4] = offsetof(struct pt_regs, r8),
[5] = offsetof(struct pt_regs, r9)
#endif
};
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Determines which flags the user has access to [1 = access, 0 = no access].
*/
#define FLAG_MASK_32 ((unsigned long) \
(X86_EFLAGS_CF | X86_EFLAGS_PF | \
X86_EFLAGS_AF | X86_EFLAGS_ZF | \
X86_EFLAGS_SF | X86_EFLAGS_TF | \
X86_EFLAGS_DF | X86_EFLAGS_OF | \
X86_EFLAGS_RF | X86_EFLAGS_AC))
/*
* Determines whether a value may be installed in a segment register.
*/
static inline bool invalid_selector(u16 value)
{
return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
}
#ifdef CONFIG_X86_32
#define FLAG_MASK FLAG_MASK_32
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
return ®s->bx + (regno >> 2);
}
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
{
/*
* Returning the value truncates it to 16 bits.
*/
unsigned int retval;
if (offset != offsetof(struct user_regs_struct, gs))
retval = *pt_regs_access(task_pt_regs(task), offset);
else {
if (task == current)
retval = get_user_gs(task_pt_regs(task));
else
retval = task_user_gs(task);
}
return retval;
}
static int set_segment_reg(struct task_struct *task,
unsigned long offset, u16 value)
{
/*
* The value argument was already truncated to 16 bits.
*/
if (invalid_selector(value))
return -EIO;
/*
* For %cs and %ss we cannot permit a null selector.
* We can permit a bogus selector as long as it has USER_RPL.
* Null selectors are fine for other segment registers, but
* we will never get back to user mode with invalid %cs or %ss
* and will take the trap in iret instead. Much code relies
* on user_mode() to distinguish a user trap frame (which can
* safely use invalid selectors) from a kernel trap frame.
*/
switch (offset) {
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ss):
if (unlikely(value == 0))
return -EIO;
default:
*pt_regs_access(task_pt_regs(task), offset) = value;
break;
case offsetof(struct user_regs_struct, gs):
if (task == current)
set_user_gs(task_pt_regs(task), value);
else
task_user_gs(task) = value;
}
return 0;
}
#else /* CONFIG_X86_64 */
#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
{
BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
return ®s->r15 + (offset / sizeof(regs->r15));
}
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
{
/*
* Returning the value truncates it to 16 bits.
*/
unsigned int seg;
switch (offset) {
case offsetof(struct user_regs_struct, fs):
if (task == current) {
/* Older gas can't assemble movq %?s,%r?? */
asm("movl %%fs,%0" : "=r" (seg));
return seg;
}
return task->thread.fsindex;
case offsetof(struct user_regs_struct, gs):
if (task == current) {
asm("movl %%gs,%0" : "=r" (seg));
return seg;
}
return task->thread.gsindex;
case offsetof(struct user_regs_struct, ds):
if (task == current) {
asm("movl %%ds,%0" : "=r" (seg));
return seg;
}
return task->thread.ds;
case offsetof(struct user_regs_struct, es):
if (task == current) {
asm("movl %%es,%0" : "=r" (seg));
return seg;
}
return task->thread.es;
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ss):
break;
}
return *pt_regs_access(task_pt_regs(task), offset);
}
static int set_segment_reg(struct task_struct *task,
unsigned long offset, u16 value)
{
/*
* The value argument was already truncated to 16 bits.
*/
if (invalid_selector(value))
return -EIO;
switch (offset) {
case offsetof(struct user_regs_struct,fs):
/*
* If this is setting fs as for normal 64-bit use but
* setting fs_base has implicitly changed it, leave it.
*/
if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
task->thread.fs != 0) ||
(value == 0 && task->thread.fsindex == FS_TLS_SEL &&
task->thread.fs == 0))
break;
task->thread.fsindex = value;
if (task == current)
loadsegment(fs, task->thread.fsindex);
break;
case offsetof(struct user_regs_struct,gs):
/*
* If this is setting gs as for normal 64-bit use but
* setting gs_base has implicitly changed it, leave it.
*/
if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
task->thread.gs != 0) ||
(value == 0 && task->thread.gsindex == GS_TLS_SEL &&
task->thread.gs == 0))
break;
task->thread.gsindex = value;
if (task == current)
load_gs_index(task->thread.gsindex);
break;
case offsetof(struct user_regs_struct,ds):
task->thread.ds = value;
if (task == current)
loadsegment(ds, task->thread.ds);
break;
case offsetof(struct user_regs_struct,es):
task->thread.es = value;
if (task == current)
loadsegment(es, task->thread.es);
break;
/*
* Can't actually change these in 64-bit mode.
*/
case offsetof(struct user_regs_struct,cs):
if (unlikely(value == 0))
return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->cs = value;
#endif
break;
case offsetof(struct user_regs_struct,ss):
if (unlikely(value == 0))
return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->ss = value;
#endif
break;
}
return 0;
}
#endif /* CONFIG_X86_32 */
static unsigned long get_flags(struct task_struct *task)
{
unsigned long retval = task_pt_regs(task)->flags;
/*
* If the debugger set TF, hide it from the readout.
*/
if (test_tsk_thread_flag(task, TIF_FORCED_TF))
retval &= ~X86_EFLAGS_TF;
return retval;
}
static int set_flags(struct task_struct *task, unsigned long value)
{
struct pt_regs *regs = task_pt_regs(task);
/*
* If the user value contains TF, mark that
* it was not "us" (the debugger) that set it.
* If not, make sure it stays set if we had.
*/
if (value & X86_EFLAGS_TF)
clear_tsk_thread_flag(task, TIF_FORCED_TF);
else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
value |= X86_EFLAGS_TF;
regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
return 0;
}
static int putreg(struct task_struct *child,
unsigned long offset, unsigned long value)
{
switch (offset) {
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ds):
case offsetof(struct user_regs_struct, es):
case offsetof(struct user_regs_struct, fs):
case offsetof(struct user_regs_struct, gs):
case offsetof(struct user_regs_struct, ss):
return set_segment_reg(child, offset, value);
case offsetof(struct user_regs_struct, flags):
return set_flags(child, value);
#ifdef CONFIG_X86_64
case offsetof(struct user_regs_struct,fs_base):
if (value >= TASK_SIZE_OF(child))
return -EIO;
/*
* When changing the segment base, use do_arch_prctl
* to set either thread.fs or thread.fsindex and the
* corresponding GDT slot.
*/
if (child->thread.fs != value)
return do_arch_prctl(child, ARCH_SET_FS, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
/*
* Exactly the same here as the %fs handling above.
*/
if (value >= TASK_SIZE_OF(child))
return -EIO;
if (child->thread.gs != value)
return do_arch_prctl(child, ARCH_SET_GS, value);
return 0;
#endif
}
*pt_regs_access(task_pt_regs(child), offset) = value;
return 0;
}
static unsigned long getreg(struct task_struct *task, unsigned long offset)
{
switch (offset) {
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ds):
case offsetof(struct user_regs_struct, es):
case offsetof(struct user_regs_struct, fs):
case offsetof(struct user_regs_struct, gs):
case offsetof(struct user_regs_struct, ss):
return get_segment_reg(task, offset);
case offsetof(struct user_regs_struct, flags):
return get_flags(task);
#ifdef CONFIG_X86_64
case offsetof(struct user_regs_struct, fs_base): {
/*
* do_arch_prctl may have used a GDT slot instead of
* the MSR. To userland, it appears the same either
* way, except the %fs segment selector might not be 0.
*/
unsigned int seg = task->thread.fsindex;
if (task->thread.fs != 0)
return task->thread.fs;
if (task == current)
asm("movl %%fs,%0" : "=r" (seg));
if (seg != FS_TLS_SEL)
return 0;
return get_desc_base(&task->thread.tls_array[FS_TLS]);
}
case offsetof(struct user_regs_struct, gs_base): {
/*
* Exactly the same here as the %fs handling above.
*/
unsigned int seg = task->thread.gsindex;
if (task->thread.gs != 0)
return task->thread.gs;
if (task == current)
asm("movl %%gs,%0" : "=r" (seg));
if (seg != GS_TLS_SEL)
return 0;
return get_desc_base(&task->thread.tls_array[GS_TLS]);
}
#endif
}
return *pt_regs_access(task_pt_regs(task), offset);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (kbuf) {
unsigned long *k = kbuf;
while (count >= sizeof(*k)) {
*k++ = getreg(target, pos);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
unsigned long __user *u = ubuf;
while (count >= sizeof(*u)) {
if (__put_user(getreg(target, pos), u++))
return -EFAULT;
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return 0;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
if (kbuf) {
const unsigned long *k = kbuf;
while (count >= sizeof(*k) && !ret) {
ret = putreg(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const unsigned long __user *u = ubuf;
while (count >= sizeof(*u) && !ret) {
unsigned long word;
ret = __get_user(word, u++);
if (ret)
break;
ret = putreg(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return ret;
}
static void ptrace_triggered(struct perf_event *bp, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
int i;
struct thread_struct *thread = &(current->thread);
/*
* Store in the virtual DR6 register the fact that the breakpoint
* was hit so the thread's debugger will see it.
*/
for (i = 0; i < HBP_NUM; i++) {
if (thread->ptrace_bps[i] == bp)
break;
}
thread->debugreg6 |= (DR_TRAP0 << i);
}
/*
* Walk through every ptrace breakpoints for this thread and
* build the dr7 value on top of their attributes.
*
*/
static unsigned long ptrace_get_dr7(struct perf_event *bp[])
{
int i;
int dr7 = 0;
struct arch_hw_breakpoint *info;
for (i = 0; i < HBP_NUM; i++) {
if (bp[i] && !bp[i]->attr.disabled) {
info = counter_arch_bp(bp[i]);
dr7 |= encode_dr7(i, info->len, info->type);
}
}
return dr7;
}
static int
ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
struct task_struct *tsk, int disabled)
{
int err;
int gen_len, gen_type;
struct perf_event_attr attr;
/*
* We should have at least an inactive breakpoint at this
* slot. It means the user is writing dr7 without having
* written the address register first
*/
if (!bp)
return -EINVAL;
err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
if (err)
return err;
attr = bp->attr;
attr.bp_len = gen_len;
attr.bp_type = gen_type;
attr.disabled = disabled;
return modify_user_hw_breakpoint(bp, &attr);
}
/*
* Handle ptrace writes to debug register 7.
*/
static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
{
struct thread_struct *thread = &(tsk->thread);
unsigned long old_dr7;
int i, orig_ret = 0, rc = 0;
int enabled, second_pass = 0;
unsigned len, type;
struct perf_event *bp;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
data &= ~DR_CONTROL_RESERVED;
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
restore:
/*
* Loop through all the hardware breakpoints, making the
* appropriate changes to each.
*/
for (i = 0; i < HBP_NUM; i++) {
enabled = decode_dr7(data, i, &len, &type);
bp = thread->ptrace_bps[i];
if (!enabled) {
if (bp) {
/*
* Don't unregister the breakpoints right-away,
* unless all register_user_hw_breakpoint()
* requests have succeeded. This prevents
* any window of opportunity for debug
* register grabbing by other users.
*/
if (!second_pass)
continue;
rc = ptrace_modify_breakpoint(bp, len, type,
tsk, 1);
if (rc)
break;
}
continue;
}
rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
if (rc)
break;
}
/*
* Make a second pass to free the remaining unused breakpoints
* or to restore the original breakpoints if an error occurred.
*/
if (!second_pass) {
second_pass = 1;
if (rc < 0) {
orig_ret = rc;
data = old_dr7;
}
goto restore;
}
ptrace_put_breakpoints(tsk);
return ((orig_ret < 0) ? orig_ret : rc);
}
/*
* Handle PTRACE_PEEKUSR calls for the debug register area.
*/
static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
{
struct thread_struct *thread = &(tsk->thread);
unsigned long val = 0;
if (n < HBP_NUM) {
struct perf_event *bp;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
bp = thread->ptrace_bps[n];
if (!bp)
val = 0;
else
val = bp->hw.info.address;
ptrace_put_breakpoints(tsk);
} else if (n == 6) {
val = thread->debugreg6;
} else if (n == 7) {
val = thread->ptrace_dr7;
}
return val;
}
static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
unsigned long addr)
{
struct perf_event *bp;
struct thread_struct *t = &tsk->thread;
struct perf_event_attr attr;
int err = 0;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
if (!t->ptrace_bps[nr]) {
ptrace_breakpoint_init(&attr);
/*
* Put stub len and type to register (reserve) an inactive but
* correct bp
*/
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
/*
* CHECKME: the previous code returned -EIO if the addr wasn't
* a valid task virtual addr. The new one will return -EINVAL in
* this case.
* -EINVAL may be what we want for in-kernel breakpoints users,
* but -EIO looks better for ptrace, since we refuse a register
* writing for the user. And anyway this is the previous
* behaviour.
*/
if (IS_ERR(bp)) {
err = PTR_ERR(bp);
goto put;
}
t->ptrace_bps[nr] = bp;
} else {
bp = t->ptrace_bps[nr];
attr = bp->attr;
attr.bp_addr = addr;
err = modify_user_hw_breakpoint(bp, &attr);
}
put:
ptrace_put_breakpoints(tsk);
return err;
}
/*
* Handle PTRACE_POKEUSR calls for the debug register area.
*/
int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
{
struct thread_struct *thread = &(tsk->thread);
int rc = 0;
/* There are no DR4 or DR5 registers */
if (n == 4 || n == 5)
return -EIO;
if (n == 6) {
thread->debugreg6 = val;
goto ret_path;
}
if (n < HBP_NUM) {
rc = ptrace_set_breakpoint_addr(tsk, n, val);
if (rc)
return rc;
}
/* All that's left is DR7 */
if (n == 7) {
rc = ptrace_write_dr7(tsk, val);
if (!rc)
thread->ptrace_dr7 = val;
}
ret_path:
return rc;
}
/*
* These access the current or another (stopped) task's io permission
* bitmap for debugging or core dump.
*/
static int ioperm_active(struct task_struct *target,
const struct user_regset *regset)
{
return target->thread.io_bitmap_max / regset->size;
}
static int ioperm_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (!target->thread.io_bitmap_ptr)
return -ENXIO;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
target->thread.io_bitmap_ptr,
0, IO_BITMAP_BYTES);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure the single step bit is not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
#endif
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
#endif
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
tmp = 0; /* Default return condition */
if (addr < sizeof(struct user_regs_struct))
tmp = getreg(child, addr);
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
addr <= offsetof(struct user, u_debugreg[7])) {
addr -= offsetof(struct user, u_debugreg[0]);
tmp = ptrace_get_debugreg(child, addr / sizeof(data));
}
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
if (addr < sizeof(struct user_regs_struct))
ret = putreg(child, addr, data);
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
addr <= offsetof(struct user, u_debugreg[7])) {
addr -= offsetof(struct user, u_debugreg[0]);
ret = ptrace_set_debugreg(child,
addr / sizeof(data), data);
}
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
#ifdef CONFIG_X86_32
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_XFP,
0, sizeof(struct user_fxsr_struct),
datap) ? -EIO : 0;
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_XFP,
0, sizeof(struct user_fxsr_struct),
datap) ? -EIO : 0;
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
case PTRACE_GET_THREAD_AREA:
if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
(struct user_desc __user *)data);
break;
case PTRACE_SET_THREAD_AREA:
if ((int) addr < 0)
return -EIO;
ret = do_set_thread_area(child, addr,
(struct user_desc __user *)data, 0);
break;
#endif
#ifdef CONFIG_X86_64
/* normal 64bit interface to access TLS data.
Works just like arch_prctl, except that the arguments
are reversed. */
case PTRACE_ARCH_PRCTL:
ret = do_arch_prctl(child, data, addr);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
#include <linux/syscalls.h>
#include <asm/ia32.h>
#include <asm/user32.h>
#define R32(l,q) \
case offsetof(struct user32, regs.l): \
regs->q = value; break
#define SEG32(rs) \
case offsetof(struct user32, regs.rs): \
return set_segment_reg(child, \
offsetof(struct user_regs_struct, rs), \
value); \
break
static int putreg32(struct task_struct *child, unsigned regno, u32 value)
{
struct pt_regs *regs = task_pt_regs(child);
switch (regno) {
SEG32(cs);
SEG32(ds);
SEG32(es);
SEG32(fs);
SEG32(gs);
SEG32(ss);
R32(ebx, bx);
R32(ecx, cx);
R32(edx, dx);
R32(edi, di);
R32(esi, si);
R32(ebp, bp);
R32(eax, ax);
R32(eip, ip);
R32(esp, sp);
case offsetof(struct user32, regs.orig_eax):
/*
* A 32-bit debugger setting orig_eax means to restore
* the state of the task restarting a 32-bit syscall.
* Make sure we interpret the -ERESTART* codes correctly
* in case the task is not actually still sitting at the
* exit from a 32-bit syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
if (syscall_get_nr(child, regs) >= 0)
task_thread_info(child)->status |= TS_COMPAT;
break;
case offsetof(struct user32, regs.eflags):
return set_flags(child, value);
case offsetof(struct user32, u_debugreg[0]) ...
offsetof(struct user32, u_debugreg[7]):
regno -= offsetof(struct user32, u_debugreg[0]);
return ptrace_set_debugreg(child, regno / 4, value);
default:
if (regno > sizeof(struct user32) || (regno & 3))
return -EIO;
/*
* Other dummy fields in the virtual user structure
* are ignored
*/
break;
}
return 0;
}
#undef R32
#undef SEG32
#define R32(l,q) \
case offsetof(struct user32, regs.l): \
*val = regs->q; break
#define SEG32(rs) \
case offsetof(struct user32, regs.rs): \
*val = get_segment_reg(child, \
offsetof(struct user_regs_struct, rs)); \
break
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
{
struct pt_regs *regs = task_pt_regs(child);
switch (regno) {
SEG32(ds);
SEG32(es);
SEG32(fs);
SEG32(gs);
R32(cs, cs);
R32(ss, ss);
R32(ebx, bx);
R32(ecx, cx);
R32(edx, dx);
R32(edi, di);
R32(esi, si);
R32(ebp, bp);
R32(eax, ax);
R32(orig_eax, orig_ax);
R32(eip, ip);
R32(esp, sp);
case offsetof(struct user32, regs.eflags):
*val = get_flags(child);
break;
case offsetof(struct user32, u_debugreg[0]) ...
offsetof(struct user32, u_debugreg[7]):
regno -= offsetof(struct user32, u_debugreg[0]);
*val = ptrace_get_debugreg(child, regno / 4);
break;
default:
if (regno > sizeof(struct user32) || (regno & 3))
return -EIO;
/*
* Other dummy fields in the virtual user structure
* are ignored
*/
*val = 0;
break;
}
return 0;
}
#undef R32
#undef SEG32
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (kbuf) {
compat_ulong_t *k = kbuf;
while (count >= sizeof(*k)) {
getreg32(target, pos, k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
compat_ulong_t __user *u = ubuf;
while (count >= sizeof(*u)) {
compat_ulong_t word;
getreg32(target, pos, &word);
if (__put_user(word, u++))
return -EFAULT;
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return 0;
}
static int genregs32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
if (kbuf) {
const compat_ulong_t *k = kbuf;
while (count >= sizeof(*k) && !ret) {
ret = putreg32(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
while (count >= sizeof(*u) && !ret) {
compat_ulong_t word;
ret = __get_user(word, u++);
if (ret)
break;
ret = putreg32(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return ret;
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
void __user *datap = compat_ptr(data);
int ret;
__u32 val;
switch (request) {
case PTRACE_PEEKUSR:
ret = getreg32(child, addr, &val);
if (ret == 0)
ret = put_user(val, (__u32 __user *)datap);
break;
case PTRACE_POKEUSR:
ret = putreg32(child, addr, data);
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct32),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_GENERAL, 0,
sizeof(struct user_regs_struct32),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_FP, 0,
sizeof(struct user_i387_ia32_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(
child, &user_x86_32_view, REGSET_FP,
0, sizeof(struct user_i387_ia32_struct), datap);
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_XFP, 0,
sizeof(struct user32_fxsr_struct),
datap);
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_XFP, 0,
sizeof(struct user32_fxsr_struct),
datap);
case PTRACE_GET_THREAD_AREA:
case PTRACE_SET_THREAD_AREA:
return arch_ptrace(child, request, addr, data);
default:
return compat_ptrace_request(child, request, addr, data);
}
return ret;
}
#endif /* CONFIG_IA32_EMULATION */
#ifdef CONFIG_X86_64
static struct user_regset x86_64_regsets[] __read_mostly = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.get = genregs_get, .set = genregs_set
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_i387_struct) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
},
[REGSET_XSTATE] = {
.core_note_type = NT_X86_XSTATE,
.size = sizeof(u64), .align = sizeof(u64),
.active = xstateregs_active, .get = xstateregs_get,
.set = xstateregs_set
},
[REGSET_IOPERM64] = {
.core_note_type = NT_386_IOPERM,
.n = IO_BITMAP_LONGS,
.size = sizeof(long), .align = sizeof(long),
.active = ioperm_active, .get = ioperm_get
},
};
static const struct user_regset_view user_x86_64_view = {
.name = "x86_64", .e_machine = EM_X86_64,
.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
};
#else /* CONFIG_X86_32 */
#define user_regs_struct32 user_regs_struct
#define genregs32_get genregs_get
#define genregs32_set genregs_set
#define user_i387_ia32_struct user_i387_struct
#define user32_fxsr_struct user_fxsr_struct
#endif /* CONFIG_X86_64 */
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
static struct user_regset x86_32_regsets[] __read_mostly = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.get = genregs32_get, .set = genregs32_set
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = fpregs_active, .get = fpregs_get, .set = fpregs_set
},
[REGSET_XFP] = {
.core_note_type = NT_PRXFPREG,
.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
},
[REGSET_XSTATE] = {
.core_note_type = NT_X86_XSTATE,
.size = sizeof(u64), .align = sizeof(u64),
.active = xstateregs_active, .get = xstateregs_get,
.set = xstateregs_set
},
[REGSET_TLS] = {
.core_note_type = NT_386_TLS,
.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
.size = sizeof(struct user_desc),
.align = sizeof(struct user_desc),
.active = regset_tls_active,
.get = regset_tls_get, .set = regset_tls_set
},
[REGSET_IOPERM32] = {
.core_note_type = NT_386_IOPERM,
.n = IO_BITMAP_BYTES / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = ioperm_active, .get = ioperm_get
},
};
static const struct user_regset_view user_x86_32_view = {
.name = "i386", .e_machine = EM_386,
.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
};
#endif
/*
* This represents bytes 464..511 in the memory layout exported through
* the REGSET_XSTATE interface.
*/
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
{
#ifdef CONFIG_X86_64
x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
#endif
xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
}
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
return &user_x86_32_view;
#endif
#ifdef CONFIG_X86_64
return &user_x86_64_view;
#endif
}
static void fill_sigtrap_info(struct task_struct *tsk,
struct pt_regs *regs,
int error_code, int si_code,
struct siginfo *info)
{
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
}
void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs,
struct siginfo *info)
{
fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code)
{
struct siginfo info;
fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
#ifdef CONFIG_X86_32
# define IS_IA32 1
#elif defined CONFIG_IA32_EMULATION
# define IS_IA32 is_compat_task()
#else
# define IS_IA32 0
#endif
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
long syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state. If we entered on the slow path, TF was already set.
*/
if (test_thread_flag(TIF_SINGLESTEP))
regs->flags |= X86_EFLAGS_TF;
/* do the secure computing check first */
secure_computing(regs->orig_ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
ret = -1L;
if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
tracehook_report_syscall_entry(regs))
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->orig_ax);
if (unlikely(current->audit_context)) {
if (IS_IA32)
audit_syscall_entry(AUDIT_ARCH_I386,
regs->orig_ax,
regs->bx, regs->cx,
regs->dx, regs->si);
#ifdef CONFIG_X86_64
else
audit_syscall_entry(AUDIT_ARCH_X86_64,
regs->orig_ax,
regs->di, regs->si,
regs->dx, regs->r10);
#endif
}
return ret ?: regs->orig_ax;
}
void syscall_trace_leave(struct pt_regs *regs)
{
bool step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
* syscall_trace_enter().
*/
step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
!test_thread_flag(TIF_SYSCALL_EMU);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
| /* By Ross Biro 1/23/92 */
/*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/debugreg.h>
#include <asm/ldt.h>
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/proto.h>
#include <asm/hw_breakpoint.h>
#include "tls.h"
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
enum x86_regset {
REGSET_GENERAL,
REGSET_FP,
REGSET_XFP,
REGSET_IOPERM64 = REGSET_XFP,
REGSET_XSTATE,
REGSET_TLS,
REGSET_IOPERM32,
};
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
#ifdef CONFIG_X86_64
REG_OFFSET_NAME(r15),
REG_OFFSET_NAME(r14),
REG_OFFSET_NAME(r13),
REG_OFFSET_NAME(r12),
REG_OFFSET_NAME(r11),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r8),
#endif
REG_OFFSET_NAME(bx),
REG_OFFSET_NAME(cx),
REG_OFFSET_NAME(dx),
REG_OFFSET_NAME(si),
REG_OFFSET_NAME(di),
REG_OFFSET_NAME(bp),
REG_OFFSET_NAME(ax),
#ifdef CONFIG_X86_32
REG_OFFSET_NAME(ds),
REG_OFFSET_NAME(es),
REG_OFFSET_NAME(fs),
REG_OFFSET_NAME(gs),
#endif
REG_OFFSET_NAME(orig_ax),
REG_OFFSET_NAME(ip),
REG_OFFSET_NAME(cs),
REG_OFFSET_NAME(flags),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(ss),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
static const int arg_offs_table[] = {
#ifdef CONFIG_X86_32
[0] = offsetof(struct pt_regs, ax),
[1] = offsetof(struct pt_regs, dx),
[2] = offsetof(struct pt_regs, cx)
#else /* CONFIG_X86_64 */
[0] = offsetof(struct pt_regs, di),
[1] = offsetof(struct pt_regs, si),
[2] = offsetof(struct pt_regs, dx),
[3] = offsetof(struct pt_regs, cx),
[4] = offsetof(struct pt_regs, r8),
[5] = offsetof(struct pt_regs, r9)
#endif
};
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Determines which flags the user has access to [1 = access, 0 = no access].
*/
#define FLAG_MASK_32 ((unsigned long) \
(X86_EFLAGS_CF | X86_EFLAGS_PF | \
X86_EFLAGS_AF | X86_EFLAGS_ZF | \
X86_EFLAGS_SF | X86_EFLAGS_TF | \
X86_EFLAGS_DF | X86_EFLAGS_OF | \
X86_EFLAGS_RF | X86_EFLAGS_AC))
/*
* Determines whether a value may be installed in a segment register.
*/
static inline bool invalid_selector(u16 value)
{
return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
}
#ifdef CONFIG_X86_32
#define FLAG_MASK FLAG_MASK_32
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
return ®s->bx + (regno >> 2);
}
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
{
/*
* Returning the value truncates it to 16 bits.
*/
unsigned int retval;
if (offset != offsetof(struct user_regs_struct, gs))
retval = *pt_regs_access(task_pt_regs(task), offset);
else {
if (task == current)
retval = get_user_gs(task_pt_regs(task));
else
retval = task_user_gs(task);
}
return retval;
}
static int set_segment_reg(struct task_struct *task,
unsigned long offset, u16 value)
{
/*
* The value argument was already truncated to 16 bits.
*/
if (invalid_selector(value))
return -EIO;
/*
* For %cs and %ss we cannot permit a null selector.
* We can permit a bogus selector as long as it has USER_RPL.
* Null selectors are fine for other segment registers, but
* we will never get back to user mode with invalid %cs or %ss
* and will take the trap in iret instead. Much code relies
* on user_mode() to distinguish a user trap frame (which can
* safely use invalid selectors) from a kernel trap frame.
*/
switch (offset) {
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ss):
if (unlikely(value == 0))
return -EIO;
default:
*pt_regs_access(task_pt_regs(task), offset) = value;
break;
case offsetof(struct user_regs_struct, gs):
if (task == current)
set_user_gs(task_pt_regs(task), value);
else
task_user_gs(task) = value;
}
return 0;
}
#else /* CONFIG_X86_64 */
#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
{
BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
return ®s->r15 + (offset / sizeof(regs->r15));
}
static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
{
/*
* Returning the value truncates it to 16 bits.
*/
unsigned int seg;
switch (offset) {
case offsetof(struct user_regs_struct, fs):
if (task == current) {
/* Older gas can't assemble movq %?s,%r?? */
asm("movl %%fs,%0" : "=r" (seg));
return seg;
}
return task->thread.fsindex;
case offsetof(struct user_regs_struct, gs):
if (task == current) {
asm("movl %%gs,%0" : "=r" (seg));
return seg;
}
return task->thread.gsindex;
case offsetof(struct user_regs_struct, ds):
if (task == current) {
asm("movl %%ds,%0" : "=r" (seg));
return seg;
}
return task->thread.ds;
case offsetof(struct user_regs_struct, es):
if (task == current) {
asm("movl %%es,%0" : "=r" (seg));
return seg;
}
return task->thread.es;
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ss):
break;
}
return *pt_regs_access(task_pt_regs(task), offset);
}
static int set_segment_reg(struct task_struct *task,
unsigned long offset, u16 value)
{
/*
* The value argument was already truncated to 16 bits.
*/
if (invalid_selector(value))
return -EIO;
switch (offset) {
case offsetof(struct user_regs_struct,fs):
/*
* If this is setting fs as for normal 64-bit use but
* setting fs_base has implicitly changed it, leave it.
*/
if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
task->thread.fs != 0) ||
(value == 0 && task->thread.fsindex == FS_TLS_SEL &&
task->thread.fs == 0))
break;
task->thread.fsindex = value;
if (task == current)
loadsegment(fs, task->thread.fsindex);
break;
case offsetof(struct user_regs_struct,gs):
/*
* If this is setting gs as for normal 64-bit use but
* setting gs_base has implicitly changed it, leave it.
*/
if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
task->thread.gs != 0) ||
(value == 0 && task->thread.gsindex == GS_TLS_SEL &&
task->thread.gs == 0))
break;
task->thread.gsindex = value;
if (task == current)
load_gs_index(task->thread.gsindex);
break;
case offsetof(struct user_regs_struct,ds):
task->thread.ds = value;
if (task == current)
loadsegment(ds, task->thread.ds);
break;
case offsetof(struct user_regs_struct,es):
task->thread.es = value;
if (task == current)
loadsegment(es, task->thread.es);
break;
/*
* Can't actually change these in 64-bit mode.
*/
case offsetof(struct user_regs_struct,cs):
if (unlikely(value == 0))
return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->cs = value;
#endif
break;
case offsetof(struct user_regs_struct,ss):
if (unlikely(value == 0))
return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->ss = value;
#endif
break;
}
return 0;
}
#endif /* CONFIG_X86_32 */
static unsigned long get_flags(struct task_struct *task)
{
unsigned long retval = task_pt_regs(task)->flags;
/*
* If the debugger set TF, hide it from the readout.
*/
if (test_tsk_thread_flag(task, TIF_FORCED_TF))
retval &= ~X86_EFLAGS_TF;
return retval;
}
static int set_flags(struct task_struct *task, unsigned long value)
{
struct pt_regs *regs = task_pt_regs(task);
/*
* If the user value contains TF, mark that
* it was not "us" (the debugger) that set it.
* If not, make sure it stays set if we had.
*/
if (value & X86_EFLAGS_TF)
clear_tsk_thread_flag(task, TIF_FORCED_TF);
else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
value |= X86_EFLAGS_TF;
regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
return 0;
}
static int putreg(struct task_struct *child,
unsigned long offset, unsigned long value)
{
switch (offset) {
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ds):
case offsetof(struct user_regs_struct, es):
case offsetof(struct user_regs_struct, fs):
case offsetof(struct user_regs_struct, gs):
case offsetof(struct user_regs_struct, ss):
return set_segment_reg(child, offset, value);
case offsetof(struct user_regs_struct, flags):
return set_flags(child, value);
#ifdef CONFIG_X86_64
case offsetof(struct user_regs_struct,fs_base):
if (value >= TASK_SIZE_OF(child))
return -EIO;
/*
* When changing the segment base, use do_arch_prctl
* to set either thread.fs or thread.fsindex and the
* corresponding GDT slot.
*/
if (child->thread.fs != value)
return do_arch_prctl(child, ARCH_SET_FS, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
/*
* Exactly the same here as the %fs handling above.
*/
if (value >= TASK_SIZE_OF(child))
return -EIO;
if (child->thread.gs != value)
return do_arch_prctl(child, ARCH_SET_GS, value);
return 0;
#endif
}
*pt_regs_access(task_pt_regs(child), offset) = value;
return 0;
}
static unsigned long getreg(struct task_struct *task, unsigned long offset)
{
switch (offset) {
case offsetof(struct user_regs_struct, cs):
case offsetof(struct user_regs_struct, ds):
case offsetof(struct user_regs_struct, es):
case offsetof(struct user_regs_struct, fs):
case offsetof(struct user_regs_struct, gs):
case offsetof(struct user_regs_struct, ss):
return get_segment_reg(task, offset);
case offsetof(struct user_regs_struct, flags):
return get_flags(task);
#ifdef CONFIG_X86_64
case offsetof(struct user_regs_struct, fs_base): {
/*
* do_arch_prctl may have used a GDT slot instead of
* the MSR. To userland, it appears the same either
* way, except the %fs segment selector might not be 0.
*/
unsigned int seg = task->thread.fsindex;
if (task->thread.fs != 0)
return task->thread.fs;
if (task == current)
asm("movl %%fs,%0" : "=r" (seg));
if (seg != FS_TLS_SEL)
return 0;
return get_desc_base(&task->thread.tls_array[FS_TLS]);
}
case offsetof(struct user_regs_struct, gs_base): {
/*
* Exactly the same here as the %fs handling above.
*/
unsigned int seg = task->thread.gsindex;
if (task->thread.gs != 0)
return task->thread.gs;
if (task == current)
asm("movl %%gs,%0" : "=r" (seg));
if (seg != GS_TLS_SEL)
return 0;
return get_desc_base(&task->thread.tls_array[GS_TLS]);
}
#endif
}
return *pt_regs_access(task_pt_regs(task), offset);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (kbuf) {
unsigned long *k = kbuf;
while (count >= sizeof(*k)) {
*k++ = getreg(target, pos);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
unsigned long __user *u = ubuf;
while (count >= sizeof(*u)) {
if (__put_user(getreg(target, pos), u++))
return -EFAULT;
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return 0;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
if (kbuf) {
const unsigned long *k = kbuf;
while (count >= sizeof(*k) && !ret) {
ret = putreg(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const unsigned long __user *u = ubuf;
while (count >= sizeof(*u) && !ret) {
unsigned long word;
ret = __get_user(word, u++);
if (ret)
break;
ret = putreg(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return ret;
}
static void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
int i;
struct thread_struct *thread = &(current->thread);
/*
* Store in the virtual DR6 register the fact that the breakpoint
* was hit so the thread's debugger will see it.
*/
for (i = 0; i < HBP_NUM; i++) {
if (thread->ptrace_bps[i] == bp)
break;
}
thread->debugreg6 |= (DR_TRAP0 << i);
}
/*
* Walk through every ptrace breakpoints for this thread and
* build the dr7 value on top of their attributes.
*
*/
static unsigned long ptrace_get_dr7(struct perf_event *bp[])
{
int i;
int dr7 = 0;
struct arch_hw_breakpoint *info;
for (i = 0; i < HBP_NUM; i++) {
if (bp[i] && !bp[i]->attr.disabled) {
info = counter_arch_bp(bp[i]);
dr7 |= encode_dr7(i, info->len, info->type);
}
}
return dr7;
}
static int
ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
struct task_struct *tsk, int disabled)
{
int err;
int gen_len, gen_type;
struct perf_event_attr attr;
/*
* We should have at least an inactive breakpoint at this
* slot. It means the user is writing dr7 without having
* written the address register first
*/
if (!bp)
return -EINVAL;
err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
if (err)
return err;
attr = bp->attr;
attr.bp_len = gen_len;
attr.bp_type = gen_type;
attr.disabled = disabled;
return modify_user_hw_breakpoint(bp, &attr);
}
/*
* Handle ptrace writes to debug register 7.
*/
static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
{
struct thread_struct *thread = &(tsk->thread);
unsigned long old_dr7;
int i, orig_ret = 0, rc = 0;
int enabled, second_pass = 0;
unsigned len, type;
struct perf_event *bp;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
data &= ~DR_CONTROL_RESERVED;
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
restore:
/*
* Loop through all the hardware breakpoints, making the
* appropriate changes to each.
*/
for (i = 0; i < HBP_NUM; i++) {
enabled = decode_dr7(data, i, &len, &type);
bp = thread->ptrace_bps[i];
if (!enabled) {
if (bp) {
/*
* Don't unregister the breakpoints right-away,
* unless all register_user_hw_breakpoint()
* requests have succeeded. This prevents
* any window of opportunity for debug
* register grabbing by other users.
*/
if (!second_pass)
continue;
rc = ptrace_modify_breakpoint(bp, len, type,
tsk, 1);
if (rc)
break;
}
continue;
}
rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
if (rc)
break;
}
/*
* Make a second pass to free the remaining unused breakpoints
* or to restore the original breakpoints if an error occurred.
*/
if (!second_pass) {
second_pass = 1;
if (rc < 0) {
orig_ret = rc;
data = old_dr7;
}
goto restore;
}
ptrace_put_breakpoints(tsk);
return ((orig_ret < 0) ? orig_ret : rc);
}
/*
* Handle PTRACE_PEEKUSR calls for the debug register area.
*/
static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
{
struct thread_struct *thread = &(tsk->thread);
unsigned long val = 0;
if (n < HBP_NUM) {
struct perf_event *bp;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
bp = thread->ptrace_bps[n];
if (!bp)
val = 0;
else
val = bp->hw.info.address;
ptrace_put_breakpoints(tsk);
} else if (n == 6) {
val = thread->debugreg6;
} else if (n == 7) {
val = thread->ptrace_dr7;
}
return val;
}
static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
unsigned long addr)
{
struct perf_event *bp;
struct thread_struct *t = &tsk->thread;
struct perf_event_attr attr;
int err = 0;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
if (!t->ptrace_bps[nr]) {
ptrace_breakpoint_init(&attr);
/*
* Put stub len and type to register (reserve) an inactive but
* correct bp
*/
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
/*
* CHECKME: the previous code returned -EIO if the addr wasn't
* a valid task virtual addr. The new one will return -EINVAL in
* this case.
* -EINVAL may be what we want for in-kernel breakpoints users,
* but -EIO looks better for ptrace, since we refuse a register
* writing for the user. And anyway this is the previous
* behaviour.
*/
if (IS_ERR(bp)) {
err = PTR_ERR(bp);
goto put;
}
t->ptrace_bps[nr] = bp;
} else {
bp = t->ptrace_bps[nr];
attr = bp->attr;
attr.bp_addr = addr;
err = modify_user_hw_breakpoint(bp, &attr);
}
put:
ptrace_put_breakpoints(tsk);
return err;
}
/*
* Handle PTRACE_POKEUSR calls for the debug register area.
*/
int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
{
struct thread_struct *thread = &(tsk->thread);
int rc = 0;
/* There are no DR4 or DR5 registers */
if (n == 4 || n == 5)
return -EIO;
if (n == 6) {
thread->debugreg6 = val;
goto ret_path;
}
if (n < HBP_NUM) {
rc = ptrace_set_breakpoint_addr(tsk, n, val);
if (rc)
return rc;
}
/* All that's left is DR7 */
if (n == 7) {
rc = ptrace_write_dr7(tsk, val);
if (!rc)
thread->ptrace_dr7 = val;
}
ret_path:
return rc;
}
/*
* These access the current or another (stopped) task's io permission
* bitmap for debugging or core dump.
*/
static int ioperm_active(struct task_struct *target,
const struct user_regset *regset)
{
return target->thread.io_bitmap_max / regset->size;
}
static int ioperm_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (!target->thread.io_bitmap_ptr)
return -ENXIO;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
target->thread.io_bitmap_ptr,
0, IO_BITMAP_BYTES);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure the single step bit is not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
#endif
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
static const struct user_regset_view user_x86_32_view; /* Initialized below. */
#endif
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
tmp = 0; /* Default return condition */
if (addr < sizeof(struct user_regs_struct))
tmp = getreg(child, addr);
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
addr <= offsetof(struct user, u_debugreg[7])) {
addr -= offsetof(struct user, u_debugreg[0]);
tmp = ptrace_get_debugreg(child, addr / sizeof(data));
}
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
break;
if (addr < sizeof(struct user_regs_struct))
ret = putreg(child, addr, data);
else if (addr >= offsetof(struct user, u_debugreg[0]) &&
addr <= offsetof(struct user, u_debugreg[7])) {
addr -= offsetof(struct user, u_debugreg[0]);
ret = ptrace_set_debugreg(child,
addr / sizeof(data), data);
}
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, sizeof(struct user_regs_struct),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_FP,
0, sizeof(struct user_i387_struct),
datap);
#ifdef CONFIG_X86_32
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_XFP,
0, sizeof(struct user_fxsr_struct),
datap) ? -EIO : 0;
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_XFP,
0, sizeof(struct user_fxsr_struct),
datap) ? -EIO : 0;
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
case PTRACE_GET_THREAD_AREA:
if ((int) addr < 0)
return -EIO;
ret = do_get_thread_area(child, addr,
(struct user_desc __user *)data);
break;
case PTRACE_SET_THREAD_AREA:
if ((int) addr < 0)
return -EIO;
ret = do_set_thread_area(child, addr,
(struct user_desc __user *)data, 0);
break;
#endif
#ifdef CONFIG_X86_64
/* normal 64bit interface to access TLS data.
Works just like arch_prctl, except that the arguments
are reversed. */
case PTRACE_ARCH_PRCTL:
ret = do_arch_prctl(child, data, addr);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
#include <linux/syscalls.h>
#include <asm/ia32.h>
#include <asm/user32.h>
#define R32(l,q) \
case offsetof(struct user32, regs.l): \
regs->q = value; break
#define SEG32(rs) \
case offsetof(struct user32, regs.rs): \
return set_segment_reg(child, \
offsetof(struct user_regs_struct, rs), \
value); \
break
static int putreg32(struct task_struct *child, unsigned regno, u32 value)
{
struct pt_regs *regs = task_pt_regs(child);
switch (regno) {
SEG32(cs);
SEG32(ds);
SEG32(es);
SEG32(fs);
SEG32(gs);
SEG32(ss);
R32(ebx, bx);
R32(ecx, cx);
R32(edx, dx);
R32(edi, di);
R32(esi, si);
R32(ebp, bp);
R32(eax, ax);
R32(eip, ip);
R32(esp, sp);
case offsetof(struct user32, regs.orig_eax):
/*
* A 32-bit debugger setting orig_eax means to restore
* the state of the task restarting a 32-bit syscall.
* Make sure we interpret the -ERESTART* codes correctly
* in case the task is not actually still sitting at the
* exit from a 32-bit syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
if (syscall_get_nr(child, regs) >= 0)
task_thread_info(child)->status |= TS_COMPAT;
break;
case offsetof(struct user32, regs.eflags):
return set_flags(child, value);
case offsetof(struct user32, u_debugreg[0]) ...
offsetof(struct user32, u_debugreg[7]):
regno -= offsetof(struct user32, u_debugreg[0]);
return ptrace_set_debugreg(child, regno / 4, value);
default:
if (regno > sizeof(struct user32) || (regno & 3))
return -EIO;
/*
* Other dummy fields in the virtual user structure
* are ignored
*/
break;
}
return 0;
}
#undef R32
#undef SEG32
#define R32(l,q) \
case offsetof(struct user32, regs.l): \
*val = regs->q; break
#define SEG32(rs) \
case offsetof(struct user32, regs.rs): \
*val = get_segment_reg(child, \
offsetof(struct user_regs_struct, rs)); \
break
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
{
struct pt_regs *regs = task_pt_regs(child);
switch (regno) {
SEG32(ds);
SEG32(es);
SEG32(fs);
SEG32(gs);
R32(cs, cs);
R32(ss, ss);
R32(ebx, bx);
R32(ecx, cx);
R32(edx, dx);
R32(edi, di);
R32(esi, si);
R32(ebp, bp);
R32(eax, ax);
R32(orig_eax, orig_ax);
R32(eip, ip);
R32(esp, sp);
case offsetof(struct user32, regs.eflags):
*val = get_flags(child);
break;
case offsetof(struct user32, u_debugreg[0]) ...
offsetof(struct user32, u_debugreg[7]):
regno -= offsetof(struct user32, u_debugreg[0]);
*val = ptrace_get_debugreg(child, regno / 4);
break;
default:
if (regno > sizeof(struct user32) || (regno & 3))
return -EIO;
/*
* Other dummy fields in the virtual user structure
* are ignored
*/
*val = 0;
break;
}
return 0;
}
#undef R32
#undef SEG32
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (kbuf) {
compat_ulong_t *k = kbuf;
while (count >= sizeof(*k)) {
getreg32(target, pos, k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
compat_ulong_t __user *u = ubuf;
while (count >= sizeof(*u)) {
compat_ulong_t word;
getreg32(target, pos, &word);
if (__put_user(word, u++))
return -EFAULT;
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return 0;
}
static int genregs32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
if (kbuf) {
const compat_ulong_t *k = kbuf;
while (count >= sizeof(*k) && !ret) {
ret = putreg32(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
while (count >= sizeof(*u) && !ret) {
compat_ulong_t word;
ret = __get_user(word, u++);
if (ret)
break;
ret = putreg32(target, pos, word);
count -= sizeof(*u);
pos += sizeof(*u);
}
}
return ret;
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
void __user *datap = compat_ptr(data);
int ret;
__u32 val;
switch (request) {
case PTRACE_PEEKUSR:
ret = getreg32(child, addr, &val);
if (ret == 0)
ret = put_user(val, (__u32 __user *)datap);
break;
case PTRACE_POKEUSR:
ret = putreg32(child, addr, data);
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_GENERAL,
0, sizeof(struct user_regs_struct32),
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_GENERAL, 0,
sizeof(struct user_regs_struct32),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_FP, 0,
sizeof(struct user_i387_ia32_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(
child, &user_x86_32_view, REGSET_FP,
0, sizeof(struct user_i387_ia32_struct), datap);
case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
return copy_regset_to_user(child, &user_x86_32_view,
REGSET_XFP, 0,
sizeof(struct user32_fxsr_struct),
datap);
case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
return copy_regset_from_user(child, &user_x86_32_view,
REGSET_XFP, 0,
sizeof(struct user32_fxsr_struct),
datap);
case PTRACE_GET_THREAD_AREA:
case PTRACE_SET_THREAD_AREA:
return arch_ptrace(child, request, addr, data);
default:
return compat_ptrace_request(child, request, addr, data);
}
return ret;
}
#endif /* CONFIG_IA32_EMULATION */
#ifdef CONFIG_X86_64
static struct user_regset x86_64_regsets[] __read_mostly = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.get = genregs_get, .set = genregs_set
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_i387_struct) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
},
[REGSET_XSTATE] = {
.core_note_type = NT_X86_XSTATE,
.size = sizeof(u64), .align = sizeof(u64),
.active = xstateregs_active, .get = xstateregs_get,
.set = xstateregs_set
},
[REGSET_IOPERM64] = {
.core_note_type = NT_386_IOPERM,
.n = IO_BITMAP_LONGS,
.size = sizeof(long), .align = sizeof(long),
.active = ioperm_active, .get = ioperm_get
},
};
static const struct user_regset_view user_x86_64_view = {
.name = "x86_64", .e_machine = EM_X86_64,
.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
};
#else /* CONFIG_X86_32 */
#define user_regs_struct32 user_regs_struct
#define genregs32_get genregs_get
#define genregs32_set genregs_set
#define user_i387_ia32_struct user_i387_struct
#define user32_fxsr_struct user_fxsr_struct
#endif /* CONFIG_X86_64 */
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
static struct user_regset x86_32_regsets[] __read_mostly = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.get = genregs32_get, .set = genregs32_set
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = fpregs_active, .get = fpregs_get, .set = fpregs_set
},
[REGSET_XFP] = {
.core_note_type = NT_PRXFPREG,
.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
},
[REGSET_XSTATE] = {
.core_note_type = NT_X86_XSTATE,
.size = sizeof(u64), .align = sizeof(u64),
.active = xstateregs_active, .get = xstateregs_get,
.set = xstateregs_set
},
[REGSET_TLS] = {
.core_note_type = NT_386_TLS,
.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
.size = sizeof(struct user_desc),
.align = sizeof(struct user_desc),
.active = regset_tls_active,
.get = regset_tls_get, .set = regset_tls_set
},
[REGSET_IOPERM32] = {
.core_note_type = NT_386_IOPERM,
.n = IO_BITMAP_BYTES / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = ioperm_active, .get = ioperm_get
},
};
static const struct user_regset_view user_x86_32_view = {
.name = "i386", .e_machine = EM_386,
.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
};
#endif
/*
* This represents bytes 464..511 in the memory layout exported through
* the REGSET_XSTATE interface.
*/
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
{
#ifdef CONFIG_X86_64
x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
#endif
xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
}
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
#endif
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
return &user_x86_32_view;
#endif
#ifdef CONFIG_X86_64
return &user_x86_64_view;
#endif
}
static void fill_sigtrap_info(struct task_struct *tsk,
struct pt_regs *regs,
int error_code, int si_code,
struct siginfo *info)
{
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
memset(info, 0, sizeof(*info));
info->si_signo = SIGTRAP;
info->si_code = si_code;
info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
}
void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs,
struct siginfo *info)
{
fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code)
{
struct siginfo info;
fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
#ifdef CONFIG_X86_32
# define IS_IA32 1
#elif defined CONFIG_IA32_EMULATION
# define IS_IA32 is_compat_task()
#else
# define IS_IA32 0
#endif
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
long syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state. If we entered on the slow path, TF was already set.
*/
if (test_thread_flag(TIF_SINGLESTEP))
regs->flags |= X86_EFLAGS_TF;
/* do the secure computing check first */
secure_computing(regs->orig_ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
ret = -1L;
if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
tracehook_report_syscall_entry(regs))
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->orig_ax);
if (unlikely(current->audit_context)) {
if (IS_IA32)
audit_syscall_entry(AUDIT_ARCH_I386,
regs->orig_ax,
regs->bx, regs->cx,
regs->dx, regs->si);
#ifdef CONFIG_X86_64
else
audit_syscall_entry(AUDIT_ARCH_X86_64,
regs->orig_ax,
regs->di, regs->si,
regs->dx, regs->r10);
#endif
}
return ret ?: regs->orig_ax;
}
void syscall_trace_leave(struct pt_regs *regs)
{
bool step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
* syscall_trace_enter().
*/
step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
!test_thread_flag(TIF_SYSCALL_EMU);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
|
173,735,285,776,580 | /*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
* Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
#include <linux/magic.h> /* STACK_END_MAGIC */
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/module.h> /* search_exception_table */
#include <linux/bootmem.h> /* max_low_pfn */
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <linux/prefetch.h> /* prefetchw */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
/*
* Page fault error code bits:
*
* bit 0 == 0: no page found 1: protection fault
* bit 1 == 0: read access 1: write access
* bit 2 == 0: kernel-mode access 1: user-mode access
* bit 3 == 1: use of reserved bit detected
* bit 4 == 1: fault was an instruction fetch
*/
enum x86_pf_error_code {
PF_PROT = 1 << 0,
PF_WRITE = 1 << 1,
PF_USER = 1 << 2,
PF_RSVD = 1 << 3,
PF_INSTR = 1 << 4,
};
/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
*/
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
{
if (unlikely(is_kmmio_active()))
if (kmmio_handler(regs, addr) == 1)
return -1;
return 0;
}
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode_vm(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Prefetch quirks:
*
* 32-bit mode:
*
* Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
* Check that here and ignore it.
*
* 64-bit mode:
*
* Sometimes the CPU reports invalid exceptions on prefetch.
* Check that here and ignore it.
*
* Opcode checker based on code by Richard Brunner.
*/
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
unsigned char opcode, int *prefetch)
{
unsigned char instr_hi = opcode & 0xf0;
unsigned char instr_lo = opcode & 0x0f;
switch (instr_hi) {
case 0x20:
case 0x30:
/*
* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
* In X86_64 long mode, the CPU will signal invalid
* opcode if some of these prefixes are present so
* X86_64 will never get here anyway
*/
return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
case 0x40:
/*
* In AMD64 long mode 0x40..0x4F are valid REX prefixes
* Need to figure out under what instruction mode the
* instruction was issued. Could check the LDT for lm,
* but for now it's good enough to assume that long
* mode only uses well known segments or kernel.
*/
return (!user_mode(regs)) || (regs->cs == __USER_CS);
#endif
case 0x60:
/* 0x64 thru 0x67 are valid prefixes in all modes. */
return (instr_lo & 0xC) == 0x4;
case 0xF0:
/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
return !instr_lo || (instr_lo>>1) == 1;
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
if (probe_kernel_address(instr, opcode))
return 0;
*prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
return 0;
default:
return 0;
}
}
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
{
unsigned char *max_instr;
unsigned char *instr;
int prefetch = 0;
/*
* If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault:
*/
if (error_code & PF_INSTR)
return 0;
instr = (void *)convert_ip_to_linear(current, regs);
max_instr = instr + 15;
if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
return 0;
while (instr < max_instr) {
unsigned char opcode;
if (probe_kernel_address(instr, opcode))
break;
instr++;
if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
break;
}
return prefetch;
}
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
struct task_struct *tsk, int fault)
{
unsigned lsb = 0;
siginfo_t info;
info.si_signo = si_signo;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
force_sig_info(si_signo, &info, tsk);
}
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd += index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
return NULL;
/*
* set_pgd(pgd, *pgd_k); here would be useless on PAE
* and redundant with the set_pmd() on non-PAE. As would
* set_pud.
*/
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
return NULL;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k);
else
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
return pmd_k;
}
void vmalloc_sync_all(void)
{
unsigned long address;
if (SHARED_KERNEL_PMD)
return;
for (address = VMALLOC_START & PMD_MASK;
address >= TASK_SIZE && address < FIXADDR_TOP;
address += PMD_SIZE) {
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
pmd_t *ret;
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
ret = vmalloc_sync_one(page_address(page), address);
spin_unlock(pgt_lock);
if (!ret)
break;
}
spin_unlock(&pgd_lock);
}
}
/*
* 32-bit:
*
* Handle a fault on the vmalloc or module mapping area
*/
static noinline __kprobes int vmalloc_fault(unsigned long address)
{
unsigned long pgd_paddr;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc area: */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
WARN_ON_ONCE(in_nmi());
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "current" here. We might be inside
* an interrupt in the middle of a task switch..
*/
pgd_paddr = read_cr3();
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
return 0;
}
/*
* Did it hit the DOS screen memory VA from vm86 mode?
*/
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk)
{
unsigned long bit;
if (!v8086_mode(regs))
return;
bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
tsk->thread.screen_bitmap |= 1 << bit;
}
static bool low_pfn(unsigned long pfn)
{
return pfn < max_low_pfn;
}
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3());
pgd_t *pgd = &base[pgd_index(address)];
pmd_t *pmd;
pte_t *pte;
#ifdef CONFIG_X86_PAE
printk("*pdpt = %016Lx ", pgd_val(*pgd));
if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
goto out;
#endif
pmd = pmd_offset(pud_offset(pgd, address), address);
printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
/*
* We must not directly access the pte in the highpte
* case if the page table is located in highmem.
* And let's rather not kmap-atomic the pte, just in case
* it's allocated already:
*/
if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
printk("\n");
}
#else /* CONFIG_X86_64: */
void vmalloc_sync_all(void)
{
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}
/*
* 64-bit:
*
* Handle a fault on the vmalloc area
*
* This assumes no large pages in there.
*/
static noinline __kprobes int vmalloc_fault(unsigned long address)
{
pgd_t *pgd, *pgd_ref;
pud_t *pud, *pud_ref;
pmd_t *pmd, *pmd_ref;
pte_t *pte, *pte_ref;
/* Make sure we are in vmalloc area: */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
WARN_ON_ONCE(in_nmi());
/*
* Copy kernel mappings over when needed. This can also
* happen within a race in page table update. In the later
* case just flush:
*/
pgd = pgd_offset(current->active_mm, address);
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
/*
* Below here mismatches are bugs because these lower tables
* are shared:
*/
pud = pud_offset(pgd, address);
pud_ref = pud_offset(pgd_ref, address);
if (pud_none(*pud_ref))
return -1;
if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
BUG();
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
if (pmd_none(*pmd_ref))
return -1;
if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
BUG();
pte_ref = pte_offset_kernel(pmd_ref, address);
if (!pte_present(*pte_ref))
return -1;
pte = pte_offset_kernel(pmd, address);
/*
* Don't use pte_page here, because the mappings can point
* outside mem_map, and the NUMA hash lookup cannot handle
* that:
*/
if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
BUG();
return 0;
}
static const char errata93_warning[] =
KERN_ERR
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
/*
* No vm86 mode in 64-bit mode:
*/
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk)
{
}
static int bad_address(void *p)
{
unsigned long dummy;
return probe_kernel_address((unsigned long *)p, dummy);
}
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
pgd_t *pgd = base + pgd_index(address);
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (bad_address(pgd))
goto bad;
printk("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (bad_address(pud))
goto bad;
printk("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (bad_address(pmd))
goto bad;
printk("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd) || pmd_large(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
if (bad_address(pte))
goto bad;
printk("PTE %lx", pte_val(*pte));
out:
printk("\n");
return;
bad:
printk("BAD\n");
}
#endif /* CONFIG_X86_64 */
/*
* Workaround for K8 erratum #93 & buggy BIOS.
*
* BIOS SMM functions are required to use a specific workaround
* to avoid corruption of the 64bit RIP register on C stepping K8.
*
* A lot of BIOS that didn't get tested properly miss this.
*
* The OS sees this as a page fault with the upper 32bits of RIP cleared.
* Try to work around it here.
*
* Note we only handle faults in kernel here.
* Does nothing on 32-bit.
*/
static int is_errata93(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
if (address != regs->ip)
return 0;
if ((address >> 32) != 0)
return 0;
address |= 0xffffffffUL << 32;
if ((address >= (u64)_stext && address <= (u64)_etext) ||
(address >= MODULES_VADDR && address <= MODULES_END)) {
printk_once(errata93_warning);
regs->ip = address;
return 1;
}
#endif
return 0;
}
/*
* Work around K8 erratum #100 K8 in compat mode occasionally jumps
* to illegal addresses >4GB.
*
* We catch this in the page fault handler because these addresses
* are not reachable. Just detect this case and return. Any code
* segment in LDT is compatibility mode.
*/
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
return 1;
#endif
return 0;
}
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
unsigned long nr;
/*
* Pentium F0 0F C7 C8 bug workaround:
*/
if (boot_cpu_data.f00f_bug) {
nr = (address - idt_descr.address) >> 3;
if (nr == 6) {
do_invalid_op(regs, 0);
return 1;
}
}
#endif
return 0;
}
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
if (!oops_may_print())
return;
if (error_code & PF_INSTR) {
unsigned int level;
pte_t *pte = lookup_address(address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
printk(nx_warning, current_uid());
}
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
else
printk(KERN_CONT "paging request");
printk(KERN_CONT " at %p\n", (void *) address);
printk(KERN_ALERT "IP:");
printk_address(regs->ip, 1);
dump_pagetable(address);
}
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct task_struct *tsk;
unsigned long flags;
int sig;
flags = oops_begin();
tsk = current;
sig = SIGKILL;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
tsk->comm, address);
dump_pagetable(address);
tsk->thread.cr2 = address;
tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code;
if (__die("Bad pagetable", regs, error_code))
sig = 0;
oops_end(flags, regs, sig);
}
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct task_struct *tsk = current;
unsigned long *stackend;
unsigned long flags;
int sig;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* 32-bit:
*
* Valid to do another page fault here, because if this fault
* had been triggered by is_prefetch fixup_exception would have
* handled it.
*
* 64-bit:
*
* Hall of shame of CPU/BIOS bugs.
*/
if (is_prefetch(regs, error_code, address))
return;
if (is_errata93(regs, address))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice:
*/
flags = oops_begin();
show_fault_oops(regs, error_code, address);
stackend = end_of_stack(tsk);
if (tsk != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
tsk->thread.cr2 = address;
tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code;
sig = SIGKILL;
if (__die("Oops", regs, error_code))
sig = 0;
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
oops_end(flags, regs, sig);
}
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
*/
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, SIGSEGV))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->ip, (void *)regs->sp, error_code);
print_vma_addr(KERN_CONT " in ", regs->ip);
printk(KERN_CONT "\n");
}
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int si_code)
{
struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */
if (error_code & PF_USER) {
/*
* It's possible to have interrupts off here:
*/
local_irq_enable();
/*
* Valid to do another page fault here because this one came
* from user space:
*/
if (is_prefetch(regs, error_code, address))
return;
if (is_errata100(regs, address))
return;
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
/* Kernel addresses are always protection faults: */
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
tsk->thread.trap_no = 14;
force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
return;
}
if (is_f00f_bug(regs, address))
return;
no_context(regs, error_code, address);
}
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int si_code)
{
struct mm_struct *mm = current->mm;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
up_read(&mm->mmap_sem);
__bad_area_nosemaphore(regs, error_code, address, si_code);
}
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_MAPERR);
}
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_ACCERR);
}
/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed):
*/
up_read(¤t->mm->mmap_sem);
pagefault_out_of_memory();
}
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
unsigned int fault)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int code = BUS_ADRERR;
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
no_context(regs, error_code, address);
return;
}
/* User-space => ok to do another page fault: */
if (is_prefetch(regs, error_code, address))
return;
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 14;
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
tsk->comm, tsk->pid, address);
code = BUS_MCEERR_AR;
}
#endif
force_sig_info_fault(SIGBUS, code, address, tsk, fault);
}
static noinline int
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
/*
* Pagefault was interrupted by SIGKILL. We have no reason to
* continue pagefault.
*/
if (fatal_signal_pending(current)) {
if (!(fault & VM_FAULT_RETRY))
up_read(¤t->mm->mmap_sem);
if (!(error_code & PF_USER))
no_context(regs, error_code, address);
return 1;
}
if (!(fault & VM_FAULT_ERROR))
return 0;
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
up_read(¤t->mm->mmap_sem);
no_context(regs, error_code, address);
return 1;
}
out_of_memory(regs, error_code, address);
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else
BUG();
}
return 1;
}
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & PF_WRITE) && !pte_write(*pte))
return 0;
if ((error_code & PF_INSTR) && !pte_exec(*pte))
return 0;
return 1;
}
/*
* Handle a spurious fault caused by a stale TLB entry.
*
* This allows us to lazily refresh the TLB when increasing the
* permissions of a kernel page (RO -> RW or NX -> X). Doing it
* eagerly is very expensive since that implies doing a full
* cross-processor TLB flush, even if no stale TLB entries exist
* on other processors.
*
* There are no security implications to leaving a stale TLB when
* increasing the permissions on a page.
*/
static noinline __kprobes int
spurious_fault(unsigned long error_code, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret;
/* Reserved-bit violation or user access to kernel space? */
if (error_code & (PF_USER | PF_RSVD))
return 0;
pgd = init_mm.pgd + pgd_index(address);
if (!pgd_present(*pgd))
return 0;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return spurious_fault_check(error_code, (pte_t *) pud);
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))
return spurious_fault_check(error_code, (pte_t *) pmd);
/*
* Note: don't use pte_present() here, since it returns true
* if the _PAGE_PROTNONE bit is set. However, this aliases the
* _PAGE_GLOBAL bit, which for kernel pages give false positives
* when CONFIG_DEBUG_PAGEALLOC is used.
*/
pte = pte_offset_kernel(pmd, address);
if (!(pte_flags(*pte) & _PAGE_PRESENT))
return 0;
ret = spurious_fault_check(error_code, pte);
if (!ret)
return 0;
/*
* Make sure we have permissions in PMD.
* If not, then there's a bug in the page tables:
*/
ret = spurious_fault_check(error_code, (pte_t *) pmd);
WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
return ret;
}
int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
return 0;
}
/* read, present: */
if (unlikely(error_code & PF_PROT))
return 1;
/* read, not present: */
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
return 1;
return 0;
}
static int fault_in_kernel_space(unsigned long address)
{
return address >= TASK_SIZE_MAX;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
int fault;
int write = error_code & PF_WRITE;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
/* Get the faulting address: */
address = read_cr2();
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
return;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
if (kmemcheck_fault(regs, address, error_code))
return;
}
/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
return;
/* kprobes don't want to hook the spurious faults: */
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock:
*/
bad_area_nosemaphore(regs, error_code, address);
return;
}
/* kprobes don't want to hook the spurious faults: */
if (unlikely(notify_page_fault(regs)))
return;
/*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
if (user_mode_vm(regs)) {
local_irq_enable();
error_code |= PF_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
/*
* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in
* the kernel and should generate an OOPS. Unfortunately, in the
* case of an erroneous fault occurring in a code path which already
* holds mmap_sem we will deadlock attempting to validate the fault
* against the address space. Luckily the kernel only validly
* references user space from well defined areas of code, which are
* listed in the exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a
* deadlock. Attempt to lock the address space, if we cannot we then
* validate the source. If this is invalid we can skip the address
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
retry:
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case we'll have missed the might_sleep() from
* down_read():
*/
might_sleep();
}
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, error_code, address);
return;
}
if (error_code & PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
* and pusha to work. ("enter $65535, $31" pushes
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
bad_area(regs, error_code, address);
return;
}
}
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (mm_fault_error(regs, error_code, address, fault))
return;
}
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
check_v8086_mode(regs, address, tsk);
up_read(&mm->mmap_sem);
}
| /*
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
* Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
*/
#include <linux/magic.h> /* STACK_END_MAGIC */
#include <linux/sched.h> /* test_thread_flag(), ... */
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/module.h> /* search_exception_table */
#include <linux/bootmem.h> /* max_low_pfn */
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <linux/prefetch.h> /* prefetchw */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
/*
* Page fault error code bits:
*
* bit 0 == 0: no page found 1: protection fault
* bit 1 == 0: read access 1: write access
* bit 2 == 0: kernel-mode access 1: user-mode access
* bit 3 == 1: use of reserved bit detected
* bit 4 == 1: fault was an instruction fetch
*/
enum x86_pf_error_code {
PF_PROT = 1 << 0,
PF_WRITE = 1 << 1,
PF_USER = 1 << 2,
PF_RSVD = 1 << 3,
PF_INSTR = 1 << 4,
};
/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
*/
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
{
if (unlikely(is_kmmio_active()))
if (kmmio_handler(regs, addr) == 1)
return -1;
return 0;
}
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (kprobes_built_in() && !user_mode_vm(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
return ret;
}
/*
* Prefetch quirks:
*
* 32-bit mode:
*
* Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
* Check that here and ignore it.
*
* 64-bit mode:
*
* Sometimes the CPU reports invalid exceptions on prefetch.
* Check that here and ignore it.
*
* Opcode checker based on code by Richard Brunner.
*/
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
unsigned char opcode, int *prefetch)
{
unsigned char instr_hi = opcode & 0xf0;
unsigned char instr_lo = opcode & 0x0f;
switch (instr_hi) {
case 0x20:
case 0x30:
/*
* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
* In X86_64 long mode, the CPU will signal invalid
* opcode if some of these prefixes are present so
* X86_64 will never get here anyway
*/
return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
case 0x40:
/*
* In AMD64 long mode 0x40..0x4F are valid REX prefixes
* Need to figure out under what instruction mode the
* instruction was issued. Could check the LDT for lm,
* but for now it's good enough to assume that long
* mode only uses well known segments or kernel.
*/
return (!user_mode(regs)) || (regs->cs == __USER_CS);
#endif
case 0x60:
/* 0x64 thru 0x67 are valid prefixes in all modes. */
return (instr_lo & 0xC) == 0x4;
case 0xF0:
/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
return !instr_lo || (instr_lo>>1) == 1;
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
if (probe_kernel_address(instr, opcode))
return 0;
*prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
return 0;
default:
return 0;
}
}
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
{
unsigned char *max_instr;
unsigned char *instr;
int prefetch = 0;
/*
* If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault:
*/
if (error_code & PF_INSTR)
return 0;
instr = (void *)convert_ip_to_linear(current, regs);
max_instr = instr + 15;
if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
return 0;
while (instr < max_instr) {
unsigned char opcode;
if (probe_kernel_address(instr, opcode))
break;
instr++;
if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
break;
}
return prefetch;
}
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
struct task_struct *tsk, int fault)
{
unsigned lsb = 0;
siginfo_t info;
info.si_signo = si_signo;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
info.si_addr_lsb = lsb;
force_sig_info(si_signo, &info, tsk);
}
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd += index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
return NULL;
/*
* set_pgd(pgd, *pgd_k); here would be useless on PAE
* and redundant with the set_pmd() on non-PAE. As would
* set_pud.
*/
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
return NULL;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k);
else
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
return pmd_k;
}
void vmalloc_sync_all(void)
{
unsigned long address;
if (SHARED_KERNEL_PMD)
return;
for (address = VMALLOC_START & PMD_MASK;
address >= TASK_SIZE && address < FIXADDR_TOP;
address += PMD_SIZE) {
struct page *page;
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
pmd_t *ret;
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
ret = vmalloc_sync_one(page_address(page), address);
spin_unlock(pgt_lock);
if (!ret)
break;
}
spin_unlock(&pgd_lock);
}
}
/*
* 32-bit:
*
* Handle a fault on the vmalloc or module mapping area
*/
static noinline __kprobes int vmalloc_fault(unsigned long address)
{
unsigned long pgd_paddr;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc area: */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
WARN_ON_ONCE(in_nmi());
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "current" here. We might be inside
* an interrupt in the middle of a task switch..
*/
pgd_paddr = read_cr3();
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
return 0;
}
/*
* Did it hit the DOS screen memory VA from vm86 mode?
*/
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk)
{
unsigned long bit;
if (!v8086_mode(regs))
return;
bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
tsk->thread.screen_bitmap |= 1 << bit;
}
static bool low_pfn(unsigned long pfn)
{
return pfn < max_low_pfn;
}
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3());
pgd_t *pgd = &base[pgd_index(address)];
pmd_t *pmd;
pte_t *pte;
#ifdef CONFIG_X86_PAE
printk("*pdpt = %016Lx ", pgd_val(*pgd));
if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
goto out;
#endif
pmd = pmd_offset(pud_offset(pgd, address), address);
printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
/*
* We must not directly access the pte in the highpte
* case if the page table is located in highmem.
* And let's rather not kmap-atomic the pte, just in case
* it's allocated already:
*/
if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
printk("\n");
}
#else /* CONFIG_X86_64: */
void vmalloc_sync_all(void)
{
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
}
/*
* 64-bit:
*
* Handle a fault on the vmalloc area
*
* This assumes no large pages in there.
*/
static noinline __kprobes int vmalloc_fault(unsigned long address)
{
pgd_t *pgd, *pgd_ref;
pud_t *pud, *pud_ref;
pmd_t *pmd, *pmd_ref;
pte_t *pte, *pte_ref;
/* Make sure we are in vmalloc area: */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
WARN_ON_ONCE(in_nmi());
/*
* Copy kernel mappings over when needed. This can also
* happen within a race in page table update. In the later
* case just flush:
*/
pgd = pgd_offset(current->active_mm, address);
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
/*
* Below here mismatches are bugs because these lower tables
* are shared:
*/
pud = pud_offset(pgd, address);
pud_ref = pud_offset(pgd_ref, address);
if (pud_none(*pud_ref))
return -1;
if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
BUG();
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
if (pmd_none(*pmd_ref))
return -1;
if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
BUG();
pte_ref = pte_offset_kernel(pmd_ref, address);
if (!pte_present(*pte_ref))
return -1;
pte = pte_offset_kernel(pmd, address);
/*
* Don't use pte_page here, because the mappings can point
* outside mem_map, and the NUMA hash lookup cannot handle
* that:
*/
if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
BUG();
return 0;
}
static const char errata93_warning[] =
KERN_ERR
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
/*
* No vm86 mode in 64-bit mode:
*/
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
struct task_struct *tsk)
{
}
static int bad_address(void *p)
{
unsigned long dummy;
return probe_kernel_address((unsigned long *)p, dummy);
}
static void dump_pagetable(unsigned long address)
{
pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
pgd_t *pgd = base + pgd_index(address);
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (bad_address(pgd))
goto bad;
printk("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (bad_address(pud))
goto bad;
printk("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (bad_address(pmd))
goto bad;
printk("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd) || pmd_large(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
if (bad_address(pte))
goto bad;
printk("PTE %lx", pte_val(*pte));
out:
printk("\n");
return;
bad:
printk("BAD\n");
}
#endif /* CONFIG_X86_64 */
/*
* Workaround for K8 erratum #93 & buggy BIOS.
*
* BIOS SMM functions are required to use a specific workaround
* to avoid corruption of the 64bit RIP register on C stepping K8.
*
* A lot of BIOS that didn't get tested properly miss this.
*
* The OS sees this as a page fault with the upper 32bits of RIP cleared.
* Try to work around it here.
*
* Note we only handle faults in kernel here.
* Does nothing on 32-bit.
*/
static int is_errata93(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
if (address != regs->ip)
return 0;
if ((address >> 32) != 0)
return 0;
address |= 0xffffffffUL << 32;
if ((address >= (u64)_stext && address <= (u64)_etext) ||
(address >= MODULES_VADDR && address <= MODULES_END)) {
printk_once(errata93_warning);
regs->ip = address;
return 1;
}
#endif
return 0;
}
/*
* Work around K8 erratum #100 K8 in compat mode occasionally jumps
* to illegal addresses >4GB.
*
* We catch this in the page fault handler because these addresses
* are not reachable. Just detect this case and return. Any code
* segment in LDT is compatibility mode.
*/
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
return 1;
#endif
return 0;
}
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
unsigned long nr;
/*
* Pentium F0 0F C7 C8 bug workaround:
*/
if (boot_cpu_data.f00f_bug) {
nr = (address - idt_descr.address) >> 3;
if (nr == 6) {
do_invalid_op(regs, 0);
return 1;
}
}
#endif
return 0;
}
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
if (!oops_may_print())
return;
if (error_code & PF_INSTR) {
unsigned int level;
pte_t *pte = lookup_address(address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
printk(nx_warning, current_uid());
}
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
else
printk(KERN_CONT "paging request");
printk(KERN_CONT " at %p\n", (void *) address);
printk(KERN_ALERT "IP:");
printk_address(regs->ip, 1);
dump_pagetable(address);
}
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct task_struct *tsk;
unsigned long flags;
int sig;
flags = oops_begin();
tsk = current;
sig = SIGKILL;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
tsk->comm, address);
dump_pagetable(address);
tsk->thread.cr2 = address;
tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code;
if (__die("Bad pagetable", regs, error_code))
sig = 0;
oops_end(flags, regs, sig);
}
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
struct task_struct *tsk = current;
unsigned long *stackend;
unsigned long flags;
int sig;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* 32-bit:
*
* Valid to do another page fault here, because if this fault
* had been triggered by is_prefetch fixup_exception would have
* handled it.
*
* 64-bit:
*
* Hall of shame of CPU/BIOS bugs.
*/
if (is_prefetch(regs, error_code, address))
return;
if (is_errata93(regs, address))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice:
*/
flags = oops_begin();
show_fault_oops(regs, error_code, address);
stackend = end_of_stack(tsk);
if (tsk != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
tsk->thread.cr2 = address;
tsk->thread.trap_no = 14;
tsk->thread.error_code = error_code;
sig = SIGKILL;
if (__die("Oops", regs, error_code))
sig = 0;
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
oops_end(flags, regs, sig);
}
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
*/
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, SIGSEGV))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->ip, (void *)regs->sp, error_code);
print_vma_addr(KERN_CONT " in ", regs->ip);
printk(KERN_CONT "\n");
}
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int si_code)
{
struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */
if (error_code & PF_USER) {
/*
* It's possible to have interrupts off here:
*/
local_irq_enable();
/*
* Valid to do another page fault here because this one came
* from user space:
*/
if (is_prefetch(regs, error_code, address))
return;
if (is_errata100(regs, address))
return;
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
/* Kernel addresses are always protection faults: */
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
tsk->thread.trap_no = 14;
force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
return;
}
if (is_f00f_bug(regs, address))
return;
no_context(regs, error_code, address);
}
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int si_code)
{
struct mm_struct *mm = current->mm;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
up_read(&mm->mmap_sem);
__bad_area_nosemaphore(regs, error_code, address, si_code);
}
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_MAPERR);
}
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
__bad_area(regs, error_code, address, SEGV_ACCERR);
}
/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed):
*/
up_read(¤t->mm->mmap_sem);
pagefault_out_of_memory();
}
static void
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
unsigned int fault)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int code = BUS_ADRERR;
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
no_context(regs, error_code, address);
return;
}
/* User-space => ok to do another page fault: */
if (is_prefetch(regs, error_code, address))
return;
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 14;
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
tsk->comm, tsk->pid, address);
code = BUS_MCEERR_AR;
}
#endif
force_sig_info_fault(SIGBUS, code, address, tsk, fault);
}
static noinline int
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, unsigned int fault)
{
/*
* Pagefault was interrupted by SIGKILL. We have no reason to
* continue pagefault.
*/
if (fatal_signal_pending(current)) {
if (!(fault & VM_FAULT_RETRY))
up_read(¤t->mm->mmap_sem);
if (!(error_code & PF_USER))
no_context(regs, error_code, address);
return 1;
}
if (!(fault & VM_FAULT_ERROR))
return 0;
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) {
up_read(¤t->mm->mmap_sem);
no_context(regs, error_code, address);
return 1;
}
out_of_memory(regs, error_code, address);
} else {
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else
BUG();
}
return 1;
}
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
if ((error_code & PF_WRITE) && !pte_write(*pte))
return 0;
if ((error_code & PF_INSTR) && !pte_exec(*pte))
return 0;
return 1;
}
/*
* Handle a spurious fault caused by a stale TLB entry.
*
* This allows us to lazily refresh the TLB when increasing the
* permissions of a kernel page (RO -> RW or NX -> X). Doing it
* eagerly is very expensive since that implies doing a full
* cross-processor TLB flush, even if no stale TLB entries exist
* on other processors.
*
* There are no security implications to leaving a stale TLB when
* increasing the permissions on a page.
*/
static noinline __kprobes int
spurious_fault(unsigned long error_code, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret;
/* Reserved-bit violation or user access to kernel space? */
if (error_code & (PF_USER | PF_RSVD))
return 0;
pgd = init_mm.pgd + pgd_index(address);
if (!pgd_present(*pgd))
return 0;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return spurious_fault_check(error_code, (pte_t *) pud);
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))
return spurious_fault_check(error_code, (pte_t *) pmd);
/*
* Note: don't use pte_present() here, since it returns true
* if the _PAGE_PROTNONE bit is set. However, this aliases the
* _PAGE_GLOBAL bit, which for kernel pages give false positives
* when CONFIG_DEBUG_PAGEALLOC is used.
*/
pte = pte_offset_kernel(pmd, address);
if (!(pte_flags(*pte) & _PAGE_PRESENT))
return 0;
ret = spurious_fault_check(error_code, pte);
if (!ret)
return 0;
/*
* Make sure we have permissions in PMD.
* If not, then there's a bug in the page tables:
*/
ret = spurious_fault_check(error_code, (pte_t *) pmd);
WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
return ret;
}
int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
return 0;
}
/* read, present: */
if (unlikely(error_code & PF_PROT))
return 1;
/* read, not present: */
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
return 1;
return 0;
}
static int fault_in_kernel_space(unsigned long address)
{
return address >= TASK_SIZE_MAX;
}
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
int fault;
int write = error_code & PF_WRITE;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
/* Get the faulting address: */
address = read_cr2();
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
return;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
if (kmemcheck_fault(regs, address, error_code))
return;
}
/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
return;
/* kprobes don't want to hook the spurious faults: */
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock:
*/
bad_area_nosemaphore(regs, error_code, address);
return;
}
/* kprobes don't want to hook the spurious faults: */
if (unlikely(notify_page_fault(regs)))
return;
/*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
if (user_mode_vm(regs)) {
local_irq_enable();
error_code |= PF_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
/*
* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in
* the kernel and should generate an OOPS. Unfortunately, in the
* case of an erroneous fault occurring in a code path which already
* holds mmap_sem we will deadlock attempting to validate the fault
* against the address space. Luckily the kernel only validly
* references user space from well defined areas of code, which are
* listed in the exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a
* deadlock. Attempt to lock the address space, if we cannot we then
* validate the source. If this is invalid we can skip the address
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
retry:
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case we'll have missed the might_sleep() from
* down_read():
*/
might_sleep();
}
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, error_code, address);
return;
}
if (error_code & PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
* and pusha to work. ("enter $65535, $31" pushes
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
bad_area(regs, error_code, address);
return;
}
}
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (mm_fault_error(regs, error_code, address, fault))
return;
}
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
check_v8086_mode(regs, address, tsk);
up_read(&mm->mmap_sem);
}
|
75,640,770,614,192 | /*
* Performance events:
*
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
*
* Data type definitions, declarations, prototypes.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
#include <linux/types.h>
#include <linux/ioctl.h>
#include <asm/byteorder.h>
/*
* User-space ABI bits:
*/
/*
* attr.type
*/
enum perf_type_id {
PERF_TYPE_HARDWARE = 0,
PERF_TYPE_SOFTWARE = 1,
PERF_TYPE_TRACEPOINT = 2,
PERF_TYPE_HW_CACHE = 3,
PERF_TYPE_RAW = 4,
PERF_TYPE_BREAKPOINT = 5,
PERF_TYPE_MAX, /* non-ABI */
};
/*
* Generalized performance event event_id types, used by the
* attr.event_id parameter of the sys_perf_event_open()
* syscall:
*/
enum perf_hw_id {
/*
* Common hardware events, generalized by the kernel:
*/
PERF_COUNT_HW_CPU_CYCLES = 0,
PERF_COUNT_HW_INSTRUCTIONS = 1,
PERF_COUNT_HW_CACHE_REFERENCES = 2,
PERF_COUNT_HW_CACHE_MISSES = 3,
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
PERF_COUNT_HW_MAX, /* non-ABI */
};
/*
* Generalized hardware cache events:
*
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
* { read, write, prefetch } x
* { accesses, misses }
*/
enum perf_hw_cache_id {
PERF_COUNT_HW_CACHE_L1D = 0,
PERF_COUNT_HW_CACHE_L1I = 1,
PERF_COUNT_HW_CACHE_LL = 2,
PERF_COUNT_HW_CACHE_DTLB = 3,
PERF_COUNT_HW_CACHE_ITLB = 4,
PERF_COUNT_HW_CACHE_BPU = 5,
PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
};
enum perf_hw_cache_op_id {
PERF_COUNT_HW_CACHE_OP_READ = 0,
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
};
enum perf_hw_cache_op_result_id {
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
};
/*
* Special "software" events provided by the kernel, even if the hardware
* does not support performance events. These events measure various
* physical and sw events of the kernel (and allow the profiling of them as
* well):
*/
enum perf_sw_ids {
PERF_COUNT_SW_CPU_CLOCK = 0,
PERF_COUNT_SW_TASK_CLOCK = 1,
PERF_COUNT_SW_PAGE_FAULTS = 2,
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_MAX, /* non-ABI */
};
/*
* Bits that can be set in attr.sample_type to request information
* in the overflow packets.
*/
enum perf_event_sample_format {
PERF_SAMPLE_IP = 1U << 0,
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
PERF_SAMPLE_READ = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_RAW = 1U << 10,
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
};
/*
* The format of the data returned by read() on a perf event fd,
* as specified by attr.read_format:
*
* struct read_format {
* { u64 value;
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
*/
enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
/*
* Hardware event_id to monitor via a performance monitoring event:
*/
struct perf_event_attr {
/*
* Major type: hardware/software/tracepoint/etc.
*/
__u32 type;
/*
* Size of the attr structure, for fwd/bwd compat.
*/
__u32 size;
/*
* Type specific configuration information.
*/
__u64 config;
union {
__u64 sample_period;
__u64 sample_freq;
};
__u64 sample_type;
__u64 read_format;
__u64 disabled : 1, /* off by default */
inherit : 1, /* children inherit it */
pinned : 1, /* must always be on PMU */
exclusive : 1, /* only group on PMU */
exclude_user : 1, /* don't count user */
exclude_kernel : 1, /* ditto kernel */
exclude_hv : 1, /* ditto hypervisor */
exclude_idle : 1, /* don't count when idle */
mmap : 1, /* include mmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
inherit_stat : 1, /* per task counts */
enable_on_exec : 1, /* next exec enables */
task : 1, /* trace fork/exit */
watermark : 1, /* wakeup_watermark */
/*
* precise_ip:
*
* 0 - SAMPLE_IP can have arbitrary skid
* 1 - SAMPLE_IP must have constant skid
* 2 - SAMPLE_IP requested to have 0 skid
* 3 - SAMPLE_IP must have 0 skid
*
* See also PERF_RECORD_MISC_EXACT_IP
*/
precise_ip : 2, /* skid constraint */
mmap_data : 1, /* non-exec mmap data */
sample_id_all : 1, /* sample_type all events */
__reserved_1 : 45;
union {
__u32 wakeup_events; /* wakeup every n events */
__u32 wakeup_watermark; /* bytes before wakeup */
};
__u32 bp_type;
union {
__u64 bp_addr;
__u64 config1; /* extension of config */
};
union {
__u64 bp_len;
__u64 config2; /* extension of config1 */
};
};
/*
* Ioctls that can be done on a perf event fd:
*/
#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
};
/*
* Structure of the page that can be mapped via mmap
*/
struct perf_event_mmap_page {
__u32 version; /* version number of this structure */
__u32 compat_version; /* lowest version this is compat with */
/*
* Bits needed to read the hw events in user-space.
*
* u32 seq;
* s64 count;
*
* do {
* seq = pc->lock;
*
* barrier()
* if (pc->index) {
* count = pmc_read(pc->index - 1);
* count += pc->offset;
* } else
* goto regular_read;
*
* barrier();
* } while (pc->lock != seq);
*
* NOTE: for obvious reason this only works on self-monitoring
* processes.
*/
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware event identifier */
__s64 offset; /* add to hardware event value */
__u64 time_enabled; /* time event active */
__u64 time_running; /* time event on cpu */
/*
* Hole for extension of the self monitor capabilities
*/
__u64 __reserved[123]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
*
* User-space reading the @data_head value should issue an rmb(), on
* SMP capable platforms, after reading this value -- see
* perf_event_wakeup().
*
* When the mapping is PROT_WRITE the @data_tail value should be
* written by userspace to reflect the last read data. In this case
* the kernel will not over-write unread data.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
};
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
#define PERF_RECORD_MISC_USER (2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
/*
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
* perf_event_attr::precise_ip.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
/*
* Reserve the last bit to indicate some extended misc field
*/
#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
struct perf_event_header {
__u32 type;
__u16 misc;
__u16 size;
};
enum perf_event_type {
/*
* If perf_event_attr.sample_id_all is set then all event types will
* have the sample_type selected fields related to where/when
* (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
* described in PERF_RECORD_SAMPLE below, it will be stashed just after
* the perf_event_header and the fields already present for the existing
* fields, i.e. at the end of the payload. That way a newer perf.data
* file will be supported by older perf tools, with these new optional
* fields being ignored.
*
* The MMAP events record the PROT_EXEC mappings so that we can
* correlate userspace IPs to code. They have the following structure:
*
* struct {
* struct perf_event_header header;
*
* u32 pid, tid;
* u64 addr;
* u64 len;
* u64 pgoff;
* char filename[];
* };
*/
PERF_RECORD_MMAP = 1,
/*
* struct {
* struct perf_event_header header;
* u64 id;
* u64 lost;
* };
*/
PERF_RECORD_LOST = 2,
/*
* struct {
* struct perf_event_header header;
*
* u32 pid, tid;
* char comm[];
* };
*/
PERF_RECORD_COMM = 3,
/*
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
* };
*/
PERF_RECORD_EXIT = 4,
/*
* struct {
* struct perf_event_header header;
* u64 time;
* u64 id;
* u64 stream_id;
* };
*/
PERF_RECORD_THROTTLE = 5,
PERF_RECORD_UNTHROTTLE = 6,
/*
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
* };
*/
PERF_RECORD_FORK = 7,
/*
* struct {
* struct perf_event_header header;
* u32 pid, tid;
*
* struct read_format values;
* };
*/
PERF_RECORD_READ = 8,
/*
* struct {
* struct perf_event_header header;
*
* { u64 ip; } && PERF_SAMPLE_IP
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
* { u64 id; } && PERF_SAMPLE_ID
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 period; } && PERF_SAMPLE_PERIOD
*
* { struct read_format values; } && PERF_SAMPLE_READ
*
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
*
* #
* # The RAW record below is opaque data wrt the ABI
* #
* # That is, the ABI doesn't make any promises wrt to
* # the stability of its content, it may vary depending
* # on event, hardware, kernel version and phase of
* # the moon.
* #
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
* #
*
* { u32 size;
* char data[size];}&& PERF_SAMPLE_RAW
* };
*/
PERF_RECORD_SAMPLE = 9,
PERF_RECORD_MAX, /* non-ABI */
};
enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
PERF_CONTEXT_KERNEL = (__u64)-128,
PERF_CONTEXT_USER = (__u64)-512,
PERF_CONTEXT_GUEST = (__u64)-2048,
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
PERF_CONTEXT_GUEST_USER = (__u64)-2560,
PERF_CONTEXT_MAX = (__u64)-4095,
};
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
*/
#ifdef CONFIG_PERF_EVENTS
# include <linux/cgroup.h>
# include <asm/perf_event.h>
# include <asm/local64.h>
#endif
struct perf_guest_info_callbacks {
int (*is_in_guest)(void);
int (*is_user_mode)(void);
unsigned long (*get_guest_ip)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/pid_namespace.h>
#include <linux/workqueue.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/irq_work.h>
#include <linux/jump_label.h>
#include <asm/atomic.h>
#include <asm/local.h>
#define PERF_MAX_STACK_DEPTH 255
struct perf_callchain_entry {
__u64 nr;
__u64 ip[PERF_MAX_STACK_DEPTH];
};
struct perf_raw_record {
u32 size;
void *data;
};
struct perf_branch_entry {
__u64 from;
__u64 to;
__u64 flags;
};
struct perf_branch_stack {
__u64 nr;
struct perf_branch_entry entries[0];
};
struct task_struct;
/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
union {
struct { /* hardware */
u64 config;
u64 last_tag;
unsigned long config_base;
unsigned long event_base;
int idx;
int last_cpu;
unsigned int extra_reg;
u64 extra_config;
int extra_alloc;
};
struct { /* software */
struct hrtimer hrtimer;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
struct arch_hw_breakpoint info;
struct list_head bp_list;
/*
* Crufty hack to avoid the chicken and egg
* problem hw_breakpoint has with context
* creation and event initalization.
*/
struct task_struct *bp_target;
};
#endif
};
int state;
local64_t prev_count;
u64 sample_period;
u64 last_period;
local64_t period_left;
u64 interrupts;
u64 freq_time_stamp;
u64 freq_count_stamp;
#endif
};
/*
* hw_perf_event::state flags
*/
#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
#define PERF_HES_ARCH 0x04
struct perf_event;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
#define PERF_EVENT_TXN 0x1
/**
* struct pmu - generic performance monitoring unit
*/
struct pmu {
struct list_head entry;
struct device *dev;
char *name;
int type;
int * __percpu pmu_disable_count;
struct perf_cpu_context * __percpu pmu_cpu_context;
int task_ctx_nr;
/*
* Fully disable/enable this PMU, can be used to protect from the PMI
* as well as for lazy/batch writing of the MSRs.
*/
void (*pmu_enable) (struct pmu *pmu); /* optional */
void (*pmu_disable) (struct pmu *pmu); /* optional */
/*
* Try and initialize the event for this PMU.
* Should return -ENOENT when the @event doesn't match this PMU.
*/
int (*event_init) (struct perf_event *event);
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
/*
* Adds/Removes a counter to/from the PMU, can be done inside
* a transaction, see the ->*_txn() methods.
*/
int (*add) (struct perf_event *event, int flags);
void (*del) (struct perf_event *event, int flags);
/*
* Starts/Stops a counter present on the PMU. The PMI handler
* should stop the counter when perf_event_overflow() returns
* !0. ->start() will be used to continue.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
/*
* Updates the counter value of the event.
*/
void (*read) (struct perf_event *event);
/*
* Group events scheduling is treated as a transaction, add
* group events as a whole and perform one schedulability test.
* If the test fails, roll back the whole group
*
* Start the transaction, after this ->add() doesn't need to
* do schedulability tests.
*/
void (*start_txn) (struct pmu *pmu); /* optional */
/*
* If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
int (*commit_txn) (struct pmu *pmu); /* optional */
/*
* Will cancel the transaction, assumes ->del() is called
* for each successful ->add() during the transaction.
*/
void (*cancel_txn) (struct pmu *pmu); /* optional */
};
/**
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
};
struct file;
struct perf_sample_data;
typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
struct perf_sample_data *,
struct pt_regs *regs);
enum perf_group_flag {
PERF_GROUP_SOFTWARE = 0x1,
};
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
struct swevent_hlist {
struct hlist_head heads[SWEVENT_HLIST_SIZE];
struct rcu_head rcu_head;
};
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info *info; /* timing info, one per cpu */
};
#endif
struct ring_buffer;
/**
* struct perf_event - performance event kernel representation:
*/
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
struct list_head group_entry;
struct list_head event_entry;
struct list_head sibling_list;
struct hlist_node hlist_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
enum perf_event_active_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
/*
* These are the total time in nanoseconds that the event
* has been enabled (i.e. eligible to run, and the task has
* been scheduled in, if this is a per-task event)
* and running (scheduled onto the CPU), respectively.
*
* They are computed from tstamp_enabled, tstamp_running and
* tstamp_stopped when the event is in INACTIVE or ACTIVE state.
*/
u64 total_time_enabled;
u64 total_time_running;
/*
* These are timestamps used for computing total_time_enabled
* and total_time_running when the event is in INACTIVE or
* ACTIVE state, measured in nanoseconds from an arbitrary point
* in time.
* tstamp_enabled: the notional time when the event was enabled
* tstamp_running: the notional time when the event was scheduled on
* tstamp_stopped: in INACTIVE state, the notional time when the
* event was scheduled off.
*/
u64 tstamp_enabled;
u64 tstamp_running;
u64 tstamp_stopped;
/*
* timestamp shadows the actual context timing but it can
* be safely used in NMI interrupt context. It reflects the
* context time as it was when the event was last scheduled in.
*
* ctx_time already accounts for ctx->timestamp. Therefore to
* compute ctx_time for a sample, simply add perf_clock().
*/
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
struct file *filp;
/*
* These accumulate total time (in nanoseconds) that children
* events have been enabled and running, respectively.
*/
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
/*
* Protect attach/detach and child_list:
*/
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
/* poll related */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;
struct pid_namespace *ns;
u64 id;
perf_overflow_handler_t overflow_handler;
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call *tp_event;
struct event_filter *filter;
#endif
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp; /* cgroup event is attach to */
int cgrp_defer_enabled;
#endif
#endif /* CONFIG_PERF_EVENTS */
};
enum perf_event_context_type {
task_context,
cpu_context,
};
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
struct pmu *pmu;
enum perf_event_context_type type;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
*/
raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
* the list you need to lock both the mutex and the spinlock.
*/
struct mutex mutex;
struct list_head pinned_groups;
struct list_head flexible_groups;
struct list_head event_list;
int nr_events;
int nr_active;
int is_active;
int nr_stat;
int rotate_disable;
atomic_t refcount;
struct task_struct *task;
/*
* Context clock, runs when context enabled.
*/
u64 time;
u64 timestamp;
/*
* These fields let us detect when two contexts have both
* been cloned (inherited) from a common ancestor.
*/
struct perf_event_context *parent_ctx;
u64 parent_gen;
u64 generation;
int pin_count;
int nr_cgroups; /* cgroup events present */
struct rcu_head rcu_head;
};
/*
* Number of contexts where an event can trigger:
* task, softirq, hardirq, nmi.
*/
#define PERF_NR_CONTEXTS 4
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
struct perf_cpu_context {
struct perf_event_context ctx;
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;
struct list_head rotation_list;
int jiffies_interval;
struct pmu *active_pmu;
struct perf_cgroup *cgrp;
};
struct perf_output_handle {
struct perf_event *event;
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
void *addr;
int page;
int nmi;
int sample;
};
#ifdef CONFIG_PERF_EVENTS
extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
extern int perf_num_counters(void);
extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
struct task_struct *task,
perf_overflow_handler_t callback);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
struct perf_sample_data {
u64 type;
u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 addr;
u64 id;
u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
u64 period;
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
};
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
{
data->addr = addr;
data->raw = NULL;
}
extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
extern void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
extern int perf_event_overflow(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs);
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
}
/*
* Return 1 for a software event, 0 for a hardware event
*/
static inline int is_software_event(struct perf_event *event)
{
return event->pmu->task_ctx_nr == perf_sw_context;
}
extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
#endif
/*
* Take a snapshot of the regs. Skip ip and frame pointer to
* the nth caller. We only need a few of the regs:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
* - bp for callchains
* - eflags, for future purposes, just in case
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(*regs));
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
static __always_inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{
struct pt_regs hot_regs;
if (static_branch(&perf_swevent_enabled[event_id])) {
if (!regs) {
perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs;
}
__perf_sw_event(event_id, nr, nmi, regs, addr);
}
}
extern struct jump_label_key perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
if (static_branch(&perf_sched_events))
__perf_event_task_sched_in(task);
}
static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
__perf_event_task_sched_out(task, next);
}
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
extern int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
static inline bool perf_paranoid_tracepoint_raw(void)
{
return sysctl_perf_event_paranoid > -1;
}
static inline bool perf_paranoid_cpu(void)
{
return sysctl_perf_event_paranoid > 0;
}
static inline bool perf_paranoid_kernel(void)
{
return sysctl_perf_event_paranoid > 1;
}
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
# define perf_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs) instruction_pointer(regs)
#endif
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
int nmi, int sample);
extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_task_tick(void);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks
(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline int perf_unregister_guest_info_callbacks
(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline void perf_event_task_tick(void) { }
#endif
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
/*
* This has to have a higher priority than migration_notifier in sched.c.
*/
#define perf_cpu_notifier(fn) \
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
(void *)(unsigned long)smp_processor_id()); \
register_cpu_notifier(&fn##_nb); \
} while (0)
#endif /* __KERNEL__ */
#endif /* _LINUX_PERF_EVENT_H */
| /*
* Performance events:
*
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
*
* Data type definitions, declarations, prototypes.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_PERF_EVENT_H
#define _LINUX_PERF_EVENT_H
#include <linux/types.h>
#include <linux/ioctl.h>
#include <asm/byteorder.h>
/*
* User-space ABI bits:
*/
/*
* attr.type
*/
enum perf_type_id {
PERF_TYPE_HARDWARE = 0,
PERF_TYPE_SOFTWARE = 1,
PERF_TYPE_TRACEPOINT = 2,
PERF_TYPE_HW_CACHE = 3,
PERF_TYPE_RAW = 4,
PERF_TYPE_BREAKPOINT = 5,
PERF_TYPE_MAX, /* non-ABI */
};
/*
* Generalized performance event event_id types, used by the
* attr.event_id parameter of the sys_perf_event_open()
* syscall:
*/
enum perf_hw_id {
/*
* Common hardware events, generalized by the kernel:
*/
PERF_COUNT_HW_CPU_CYCLES = 0,
PERF_COUNT_HW_INSTRUCTIONS = 1,
PERF_COUNT_HW_CACHE_REFERENCES = 2,
PERF_COUNT_HW_CACHE_MISSES = 3,
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
PERF_COUNT_HW_MAX, /* non-ABI */
};
/*
* Generalized hardware cache events:
*
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
* { read, write, prefetch } x
* { accesses, misses }
*/
enum perf_hw_cache_id {
PERF_COUNT_HW_CACHE_L1D = 0,
PERF_COUNT_HW_CACHE_L1I = 1,
PERF_COUNT_HW_CACHE_LL = 2,
PERF_COUNT_HW_CACHE_DTLB = 3,
PERF_COUNT_HW_CACHE_ITLB = 4,
PERF_COUNT_HW_CACHE_BPU = 5,
PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
};
enum perf_hw_cache_op_id {
PERF_COUNT_HW_CACHE_OP_READ = 0,
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
};
enum perf_hw_cache_op_result_id {
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
};
/*
* Special "software" events provided by the kernel, even if the hardware
* does not support performance events. These events measure various
* physical and sw events of the kernel (and allow the profiling of them as
* well):
*/
enum perf_sw_ids {
PERF_COUNT_SW_CPU_CLOCK = 0,
PERF_COUNT_SW_TASK_CLOCK = 1,
PERF_COUNT_SW_PAGE_FAULTS = 2,
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_MAX, /* non-ABI */
};
/*
* Bits that can be set in attr.sample_type to request information
* in the overflow packets.
*/
enum perf_event_sample_format {
PERF_SAMPLE_IP = 1U << 0,
PERF_SAMPLE_TID = 1U << 1,
PERF_SAMPLE_TIME = 1U << 2,
PERF_SAMPLE_ADDR = 1U << 3,
PERF_SAMPLE_READ = 1U << 4,
PERF_SAMPLE_CALLCHAIN = 1U << 5,
PERF_SAMPLE_ID = 1U << 6,
PERF_SAMPLE_CPU = 1U << 7,
PERF_SAMPLE_PERIOD = 1U << 8,
PERF_SAMPLE_STREAM_ID = 1U << 9,
PERF_SAMPLE_RAW = 1U << 10,
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
};
/*
* The format of the data returned by read() on a perf event fd,
* as specified by attr.read_format:
*
* struct read_format {
* { u64 value;
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
*/
enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
/*
* Hardware event_id to monitor via a performance monitoring event:
*/
struct perf_event_attr {
/*
* Major type: hardware/software/tracepoint/etc.
*/
__u32 type;
/*
* Size of the attr structure, for fwd/bwd compat.
*/
__u32 size;
/*
* Type specific configuration information.
*/
__u64 config;
union {
__u64 sample_period;
__u64 sample_freq;
};
__u64 sample_type;
__u64 read_format;
__u64 disabled : 1, /* off by default */
inherit : 1, /* children inherit it */
pinned : 1, /* must always be on PMU */
exclusive : 1, /* only group on PMU */
exclude_user : 1, /* don't count user */
exclude_kernel : 1, /* ditto kernel */
exclude_hv : 1, /* ditto hypervisor */
exclude_idle : 1, /* don't count when idle */
mmap : 1, /* include mmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
inherit_stat : 1, /* per task counts */
enable_on_exec : 1, /* next exec enables */
task : 1, /* trace fork/exit */
watermark : 1, /* wakeup_watermark */
/*
* precise_ip:
*
* 0 - SAMPLE_IP can have arbitrary skid
* 1 - SAMPLE_IP must have constant skid
* 2 - SAMPLE_IP requested to have 0 skid
* 3 - SAMPLE_IP must have 0 skid
*
* See also PERF_RECORD_MISC_EXACT_IP
*/
precise_ip : 2, /* skid constraint */
mmap_data : 1, /* non-exec mmap data */
sample_id_all : 1, /* sample_type all events */
__reserved_1 : 45;
union {
__u32 wakeup_events; /* wakeup every n events */
__u32 wakeup_watermark; /* bytes before wakeup */
};
__u32 bp_type;
union {
__u64 bp_addr;
__u64 config1; /* extension of config */
};
union {
__u64 bp_len;
__u64 config2; /* extension of config1 */
};
};
/*
* Ioctls that can be done on a perf event fd:
*/
#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
};
/*
* Structure of the page that can be mapped via mmap
*/
struct perf_event_mmap_page {
__u32 version; /* version number of this structure */
__u32 compat_version; /* lowest version this is compat with */
/*
* Bits needed to read the hw events in user-space.
*
* u32 seq;
* s64 count;
*
* do {
* seq = pc->lock;
*
* barrier()
* if (pc->index) {
* count = pmc_read(pc->index - 1);
* count += pc->offset;
* } else
* goto regular_read;
*
* barrier();
* } while (pc->lock != seq);
*
* NOTE: for obvious reason this only works on self-monitoring
* processes.
*/
__u32 lock; /* seqlock for synchronization */
__u32 index; /* hardware event identifier */
__s64 offset; /* add to hardware event value */
__u64 time_enabled; /* time event active */
__u64 time_running; /* time event on cpu */
/*
* Hole for extension of the self monitor capabilities
*/
__u64 __reserved[123]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
*
* User-space reading the @data_head value should issue an rmb(), on
* SMP capable platforms, after reading this value -- see
* perf_event_wakeup().
*
* When the mapping is PROT_WRITE the @data_tail value should be
* written by userspace to reflect the last read data. In this case
* the kernel will not over-write unread data.
*/
__u64 data_head; /* head in the data section */
__u64 data_tail; /* user-space written tail */
};
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
#define PERF_RECORD_MISC_USER (2 << 0)
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
/*
* Indicates that the content of PERF_SAMPLE_IP points to
* the actual instruction that triggered the event. See also
* perf_event_attr::precise_ip.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
/*
* Reserve the last bit to indicate some extended misc field
*/
#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
struct perf_event_header {
__u32 type;
__u16 misc;
__u16 size;
};
enum perf_event_type {
/*
* If perf_event_attr.sample_id_all is set then all event types will
* have the sample_type selected fields related to where/when
* (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
* described in PERF_RECORD_SAMPLE below, it will be stashed just after
* the perf_event_header and the fields already present for the existing
* fields, i.e. at the end of the payload. That way a newer perf.data
* file will be supported by older perf tools, with these new optional
* fields being ignored.
*
* The MMAP events record the PROT_EXEC mappings so that we can
* correlate userspace IPs to code. They have the following structure:
*
* struct {
* struct perf_event_header header;
*
* u32 pid, tid;
* u64 addr;
* u64 len;
* u64 pgoff;
* char filename[];
* };
*/
PERF_RECORD_MMAP = 1,
/*
* struct {
* struct perf_event_header header;
* u64 id;
* u64 lost;
* };
*/
PERF_RECORD_LOST = 2,
/*
* struct {
* struct perf_event_header header;
*
* u32 pid, tid;
* char comm[];
* };
*/
PERF_RECORD_COMM = 3,
/*
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
* };
*/
PERF_RECORD_EXIT = 4,
/*
* struct {
* struct perf_event_header header;
* u64 time;
* u64 id;
* u64 stream_id;
* };
*/
PERF_RECORD_THROTTLE = 5,
PERF_RECORD_UNTHROTTLE = 6,
/*
* struct {
* struct perf_event_header header;
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
* };
*/
PERF_RECORD_FORK = 7,
/*
* struct {
* struct perf_event_header header;
* u32 pid, tid;
*
* struct read_format values;
* };
*/
PERF_RECORD_READ = 8,
/*
* struct {
* struct perf_event_header header;
*
* { u64 ip; } && PERF_SAMPLE_IP
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
* { u64 id; } && PERF_SAMPLE_ID
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 period; } && PERF_SAMPLE_PERIOD
*
* { struct read_format values; } && PERF_SAMPLE_READ
*
* { u64 nr,
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
*
* #
* # The RAW record below is opaque data wrt the ABI
* #
* # That is, the ABI doesn't make any promises wrt to
* # the stability of its content, it may vary depending
* # on event, hardware, kernel version and phase of
* # the moon.
* #
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
* #
*
* { u32 size;
* char data[size];}&& PERF_SAMPLE_RAW
* };
*/
PERF_RECORD_SAMPLE = 9,
PERF_RECORD_MAX, /* non-ABI */
};
enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
PERF_CONTEXT_KERNEL = (__u64)-128,
PERF_CONTEXT_USER = (__u64)-512,
PERF_CONTEXT_GUEST = (__u64)-2048,
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
PERF_CONTEXT_GUEST_USER = (__u64)-2560,
PERF_CONTEXT_MAX = (__u64)-4095,
};
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
*/
#ifdef CONFIG_PERF_EVENTS
# include <linux/cgroup.h>
# include <asm/perf_event.h>
# include <asm/local64.h>
#endif
struct perf_guest_info_callbacks {
int (*is_in_guest)(void);
int (*is_user_mode)(void);
unsigned long (*get_guest_ip)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/pid_namespace.h>
#include <linux/workqueue.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/irq_work.h>
#include <linux/jump_label.h>
#include <asm/atomic.h>
#include <asm/local.h>
#define PERF_MAX_STACK_DEPTH 255
struct perf_callchain_entry {
__u64 nr;
__u64 ip[PERF_MAX_STACK_DEPTH];
};
struct perf_raw_record {
u32 size;
void *data;
};
struct perf_branch_entry {
__u64 from;
__u64 to;
__u64 flags;
};
struct perf_branch_stack {
__u64 nr;
struct perf_branch_entry entries[0];
};
struct task_struct;
/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
#ifdef CONFIG_PERF_EVENTS
union {
struct { /* hardware */
u64 config;
u64 last_tag;
unsigned long config_base;
unsigned long event_base;
int idx;
int last_cpu;
unsigned int extra_reg;
u64 extra_config;
int extra_alloc;
};
struct { /* software */
struct hrtimer hrtimer;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
struct arch_hw_breakpoint info;
struct list_head bp_list;
/*
* Crufty hack to avoid the chicken and egg
* problem hw_breakpoint has with context
* creation and event initalization.
*/
struct task_struct *bp_target;
};
#endif
};
int state;
local64_t prev_count;
u64 sample_period;
u64 last_period;
local64_t period_left;
u64 interrupts;
u64 freq_time_stamp;
u64 freq_count_stamp;
#endif
};
/*
* hw_perf_event::state flags
*/
#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
#define PERF_HES_ARCH 0x04
struct perf_event;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
#define PERF_EVENT_TXN 0x1
/**
* struct pmu - generic performance monitoring unit
*/
struct pmu {
struct list_head entry;
struct device *dev;
char *name;
int type;
int * __percpu pmu_disable_count;
struct perf_cpu_context * __percpu pmu_cpu_context;
int task_ctx_nr;
/*
* Fully disable/enable this PMU, can be used to protect from the PMI
* as well as for lazy/batch writing of the MSRs.
*/
void (*pmu_enable) (struct pmu *pmu); /* optional */
void (*pmu_disable) (struct pmu *pmu); /* optional */
/*
* Try and initialize the event for this PMU.
* Should return -ENOENT when the @event doesn't match this PMU.
*/
int (*event_init) (struct perf_event *event);
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
/*
* Adds/Removes a counter to/from the PMU, can be done inside
* a transaction, see the ->*_txn() methods.
*/
int (*add) (struct perf_event *event, int flags);
void (*del) (struct perf_event *event, int flags);
/*
* Starts/Stops a counter present on the PMU. The PMI handler
* should stop the counter when perf_event_overflow() returns
* !0. ->start() will be used to continue.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
/*
* Updates the counter value of the event.
*/
void (*read) (struct perf_event *event);
/*
* Group events scheduling is treated as a transaction, add
* group events as a whole and perform one schedulability test.
* If the test fails, roll back the whole group
*
* Start the transaction, after this ->add() doesn't need to
* do schedulability tests.
*/
void (*start_txn) (struct pmu *pmu); /* optional */
/*
* If ->start_txn() disabled the ->add() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
int (*commit_txn) (struct pmu *pmu); /* optional */
/*
* Will cancel the transaction, assumes ->del() is called
* for each successful ->add() during the transaction.
*/
void (*cancel_txn) (struct pmu *pmu); /* optional */
};
/**
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
};
struct file;
struct perf_sample_data;
typedef void (*perf_overflow_handler_t)(struct perf_event *,
struct perf_sample_data *,
struct pt_regs *regs);
enum perf_group_flag {
PERF_GROUP_SOFTWARE = 0x1,
};
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
struct swevent_hlist {
struct hlist_head heads[SWEVENT_HLIST_SIZE];
struct rcu_head rcu_head;
};
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info *info; /* timing info, one per cpu */
};
#endif
struct ring_buffer;
/**
* struct perf_event - performance event kernel representation:
*/
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
struct list_head group_entry;
struct list_head event_entry;
struct list_head sibling_list;
struct hlist_node hlist_entry;
int nr_siblings;
int group_flags;
struct perf_event *group_leader;
struct pmu *pmu;
enum perf_event_active_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
/*
* These are the total time in nanoseconds that the event
* has been enabled (i.e. eligible to run, and the task has
* been scheduled in, if this is a per-task event)
* and running (scheduled onto the CPU), respectively.
*
* They are computed from tstamp_enabled, tstamp_running and
* tstamp_stopped when the event is in INACTIVE or ACTIVE state.
*/
u64 total_time_enabled;
u64 total_time_running;
/*
* These are timestamps used for computing total_time_enabled
* and total_time_running when the event is in INACTIVE or
* ACTIVE state, measured in nanoseconds from an arbitrary point
* in time.
* tstamp_enabled: the notional time when the event was enabled
* tstamp_running: the notional time when the event was scheduled on
* tstamp_stopped: in INACTIVE state, the notional time when the
* event was scheduled off.
*/
u64 tstamp_enabled;
u64 tstamp_running;
u64 tstamp_stopped;
/*
* timestamp shadows the actual context timing but it can
* be safely used in NMI interrupt context. It reflects the
* context time as it was when the event was last scheduled in.
*
* ctx_time already accounts for ctx->timestamp. Therefore to
* compute ctx_time for a sample, simply add perf_clock().
*/
u64 shadow_ctx_time;
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
u16 read_size;
struct hw_perf_event hw;
struct perf_event_context *ctx;
struct file *filp;
/*
* These accumulate total time (in nanoseconds) that children
* events have been enabled and running, respectively.
*/
atomic64_t child_total_time_enabled;
atomic64_t child_total_time_running;
/*
* Protect attach/detach and child_list:
*/
struct mutex child_mutex;
struct list_head child_list;
struct perf_event *parent;
int oncpu;
int cpu;
struct list_head owner_entry;
struct task_struct *owner;
/* mmap bits */
struct mutex mmap_mutex;
atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
/* poll related */
wait_queue_head_t waitq;
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
int pending_wakeup;
int pending_kill;
int pending_disable;
struct irq_work pending;
atomic_t event_limit;
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;
struct pid_namespace *ns;
u64 id;
perf_overflow_handler_t overflow_handler;
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call *tp_event;
struct event_filter *filter;
#endif
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp; /* cgroup event is attach to */
int cgrp_defer_enabled;
#endif
#endif /* CONFIG_PERF_EVENTS */
};
enum perf_event_context_type {
task_context,
cpu_context,
};
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
struct pmu *pmu;
enum perf_event_context_type type;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
*/
raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
* the list you need to lock both the mutex and the spinlock.
*/
struct mutex mutex;
struct list_head pinned_groups;
struct list_head flexible_groups;
struct list_head event_list;
int nr_events;
int nr_active;
int is_active;
int nr_stat;
int rotate_disable;
atomic_t refcount;
struct task_struct *task;
/*
* Context clock, runs when context enabled.
*/
u64 time;
u64 timestamp;
/*
* These fields let us detect when two contexts have both
* been cloned (inherited) from a common ancestor.
*/
struct perf_event_context *parent_ctx;
u64 parent_gen;
u64 generation;
int pin_count;
int nr_cgroups; /* cgroup events present */
struct rcu_head rcu_head;
};
/*
* Number of contexts where an event can trigger:
* task, softirq, hardirq, nmi.
*/
#define PERF_NR_CONTEXTS 4
/**
* struct perf_event_cpu_context - per cpu event context structure
*/
struct perf_cpu_context {
struct perf_event_context ctx;
struct perf_event_context *task_ctx;
int active_oncpu;
int exclusive;
struct list_head rotation_list;
int jiffies_interval;
struct pmu *active_pmu;
struct perf_cgroup *cgrp;
};
struct perf_output_handle {
struct perf_event *event;
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
void *addr;
int page;
int sample;
};
#ifdef CONFIG_PERF_EVENTS
extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
extern int perf_num_counters(void);
extern const char *perf_pmu_name(void);
extern void __perf_event_task_sched_in(struct task_struct *task);
extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
extern void perf_pmu_enable(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
struct task_struct *task,
perf_overflow_handler_t callback);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
struct perf_sample_data {
u64 type;
u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 addr;
u64 id;
u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
u64 period;
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
};
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
{
data->addr = addr;
data->raw = NULL;
}
extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
extern void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
static inline bool is_sampling_event(struct perf_event *event)
{
return event->attr.sample_period != 0;
}
/*
* Return 1 for a software event, 0 for a hardware event
*/
static inline int is_software_event(struct perf_event *event)
{
return event->pmu->task_ctx_nr == perf_sw_context;
}
extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
#endif
/*
* Take a snapshot of the regs. Skip ip and frame pointer to
* the nth caller. We only need a few of the regs:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
* - bp for callchains
* - eflags, for future purposes, just in case
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(*regs));
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
static __always_inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct pt_regs hot_regs;
if (static_branch(&perf_swevent_enabled[event_id])) {
if (!regs) {
perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs;
}
__perf_sw_event(event_id, nr, regs, addr);
}
}
extern struct jump_label_key perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
if (static_branch(&perf_sched_events))
__perf_event_task_sched_in(task);
}
static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
__perf_event_task_sched_out(task, next);
}
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
extern void perf_event_comm(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
extern int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
static inline bool perf_paranoid_tracepoint_raw(void)
{
return sysctl_perf_event_paranoid > -1;
}
static inline bool perf_paranoid_cpu(void)
{
return sysctl_perf_event_paranoid > 0;
}
static inline bool perf_paranoid_kernel(void)
{
return sysctl_perf_event_paranoid > 1;
}
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
# define perf_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
# define perf_instruction_pointer(regs) instruction_pointer(regs)
#endif
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
int sample);
extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_task_tick(void);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline void perf_event_print_debug(void) { }
static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; }
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks
(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline int perf_unregister_guest_info_callbacks
(struct perf_guest_info_callbacks *callbacks) { return 0; }
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_comm(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
static inline void perf_event_enable(struct perf_event *event) { }
static inline void perf_event_disable(struct perf_event *event) { }
static inline void perf_event_task_tick(void) { }
#endif
#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
/*
* This has to have a higher priority than migration_notifier in sched.c.
*/
#define perf_cpu_notifier(fn) \
do { \
static struct notifier_block fn##_nb __cpuinitdata = \
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_STARTING, \
(void *)(unsigned long)smp_processor_id()); \
fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
(void *)(unsigned long)smp_processor_id()); \
register_cpu_notifier(&fn##_nb); \
} while (0)
#endif /* __KERNEL__ */
#endif /* _LINUX_PERF_EVENT_H */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.