idx int64 | func_before string | Vulnerability Classification string | vul int64 | func_after string | patch string | CWE ID string | lines_before string | lines_after string |
|---|---|---|---|---|---|---|---|---|
17,100 | static unsigned long read_swap_header(struct swap_info_struct *p,
union swap_header *swap_header,
struct inode *inode)
{
int i;
unsigned long maxpages;
unsigned long swapfilepages;
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
printk(KERN_ERR "Unable to find swap-space signature\n");
return 0;
}
/* swap partition endianess hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
printk(KERN_WARNING
"Unable to handle swap header version %d\n",
swap_header->info.version);
return 0;
}
p->lowest_bit = 1;
p->cluster_next = 1;
p->cluster_nr = 0;
/*
* Find out how many pages are allowed for a single swap
* device. There are three limiting factors: 1) the number
* of bits for the swap offset in the swp_entry_t type, and
* 2) the number of bits in the swap pte as defined by the
* the different architectures, and 3) the number of free bits
* in an exceptional radix_tree entry. In order to find the
* largest possible bit mask, a swap entry with swap type 0
* and swap offset ~0UL is created, encoded to a swap pte,
* decoded to a swp_entry_t again, and finally the swap
* offset is extracted. This will mask all the bits from
* the initial ~0UL mask that can't be encoded in either
* the swp_entry_t or the architecture definition of a
* swap pte. Then the same is done for a radix_tree entry.
*/
maxpages = swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL))));
maxpages = swp_offset(radix_to_swp_entry(
swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
if (maxpages > swap_header->info.last_page) {
maxpages = swap_header->info.last_page + 1;
/* p->max is an unsigned int: don't overflow it */
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
p->highest_bit = maxpages - 1;
if (!maxpages)
return 0;
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
if (swapfilepages && maxpages > swapfilepages) {
printk(KERN_WARNING
"Swap area shorter than signature indicates\n");
return 0;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
return 0;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
return maxpages;
}
| DoS | 0 | static unsigned long read_swap_header(struct swap_info_struct *p,
union swap_header *swap_header,
struct inode *inode)
{
int i;
unsigned long maxpages;
unsigned long swapfilepages;
if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
printk(KERN_ERR "Unable to find swap-space signature\n");
return 0;
}
/* swap partition endianess hack... */
if (swab32(swap_header->info.version) == 1) {
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
/* Check the swap header's sub-version */
if (swap_header->info.version != 1) {
printk(KERN_WARNING
"Unable to handle swap header version %d\n",
swap_header->info.version);
return 0;
}
p->lowest_bit = 1;
p->cluster_next = 1;
p->cluster_nr = 0;
/*
* Find out how many pages are allowed for a single swap
* device. There are three limiting factors: 1) the number
* of bits for the swap offset in the swp_entry_t type, and
* 2) the number of bits in the swap pte as defined by the
* the different architectures, and 3) the number of free bits
* in an exceptional radix_tree entry. In order to find the
* largest possible bit mask, a swap entry with swap type 0
* and swap offset ~0UL is created, encoded to a swap pte,
* decoded to a swp_entry_t again, and finally the swap
* offset is extracted. This will mask all the bits from
* the initial ~0UL mask that can't be encoded in either
* the swp_entry_t or the architecture definition of a
* swap pte. Then the same is done for a radix_tree entry.
*/
maxpages = swp_offset(pte_to_swp_entry(
swp_entry_to_pte(swp_entry(0, ~0UL))));
maxpages = swp_offset(radix_to_swp_entry(
swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
if (maxpages > swap_header->info.last_page) {
maxpages = swap_header->info.last_page + 1;
/* p->max is an unsigned int: don't overflow it */
if ((unsigned int)maxpages == 0)
maxpages = UINT_MAX;
}
p->highest_bit = maxpages - 1;
if (!maxpages)
return 0;
swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
if (swapfilepages && maxpages > swapfilepages) {
printk(KERN_WARNING
"Swap area shorter than signature indicates\n");
return 0;
}
if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
return 0;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
return 0;
return maxpages;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,101 | int reuse_swap_page(struct page *page)
{
int count;
VM_BUG_ON(!PageLocked(page));
if (unlikely(PageKsm(page)))
return 0;
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
if (count == 1 && !PageWriteback(page)) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
}
return count <= 1;
}
| DoS | 0 | int reuse_swap_page(struct page *page)
{
int count;
VM_BUG_ON(!PageLocked(page));
if (unlikely(PageKsm(page)))
return 0;
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
if (count == 1 && !PageWriteback(page)) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
}
return count <= 1;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,102 | static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct inode *inode;
unsigned blocks_per_page;
unsigned long page_no;
unsigned blkbits;
sector_t probe_block;
sector_t last_block;
sector_t lowest_block = -1;
sector_t highest_block = 0;
int nr_extents = 0;
int ret;
inode = sis->swap_file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
goto out;
}
blkbits = inode->i_blkbits;
blocks_per_page = PAGE_SIZE >> blkbits;
/*
* Map all the blocks into the extent list. This code doesn't try
* to be very smart.
*/
probe_block = 0;
page_no = 0;
last_block = i_size_read(inode) >> blkbits;
while ((probe_block + blocks_per_page) <= last_block &&
page_no < sis->max) {
unsigned block_in_page;
sector_t first_block;
first_block = bmap(inode, probe_block);
if (first_block == 0)
goto bad_bmap;
/*
* It must be PAGE_SIZE aligned on-disk
*/
if (first_block & (blocks_per_page - 1)) {
probe_block++;
goto reprobe;
}
for (block_in_page = 1; block_in_page < blocks_per_page;
block_in_page++) {
sector_t block;
block = bmap(inode, probe_block + block_in_page);
if (block == 0)
goto bad_bmap;
if (block != first_block + block_in_page) {
/* Discontiguity */
probe_block++;
goto reprobe;
}
}
first_block >>= (PAGE_SHIFT - blkbits);
if (page_no) { /* exclude the header page */
if (first_block < lowest_block)
lowest_block = first_block;
if (first_block > highest_block)
highest_block = first_block;
}
/*
* We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
*/
ret = add_swap_extent(sis, page_no, 1, first_block);
if (ret < 0)
goto out;
nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
ret = nr_extents;
*span = 1 + highest_block - lowest_block;
if (page_no == 0)
page_no = 1; /* force Empty message */
sis->max = page_no;
sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
out:
return ret;
bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n");
ret = -EINVAL;
goto out;
}
| DoS | 0 | static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct inode *inode;
unsigned blocks_per_page;
unsigned long page_no;
unsigned blkbits;
sector_t probe_block;
sector_t last_block;
sector_t lowest_block = -1;
sector_t highest_block = 0;
int nr_extents = 0;
int ret;
inode = sis->swap_file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
goto out;
}
blkbits = inode->i_blkbits;
blocks_per_page = PAGE_SIZE >> blkbits;
/*
* Map all the blocks into the extent list. This code doesn't try
* to be very smart.
*/
probe_block = 0;
page_no = 0;
last_block = i_size_read(inode) >> blkbits;
while ((probe_block + blocks_per_page) <= last_block &&
page_no < sis->max) {
unsigned block_in_page;
sector_t first_block;
first_block = bmap(inode, probe_block);
if (first_block == 0)
goto bad_bmap;
/*
* It must be PAGE_SIZE aligned on-disk
*/
if (first_block & (blocks_per_page - 1)) {
probe_block++;
goto reprobe;
}
for (block_in_page = 1; block_in_page < blocks_per_page;
block_in_page++) {
sector_t block;
block = bmap(inode, probe_block + block_in_page);
if (block == 0)
goto bad_bmap;
if (block != first_block + block_in_page) {
/* Discontiguity */
probe_block++;
goto reprobe;
}
}
first_block >>= (PAGE_SHIFT - blkbits);
if (page_no) { /* exclude the header page */
if (first_block < lowest_block)
lowest_block = first_block;
if (first_block > highest_block)
highest_block = first_block;
}
/*
* We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
*/
ret = add_swap_extent(sis, page_no, 1, first_block);
if (ret < 0)
goto out;
nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
ret = nr_extents;
*span = 1 + highest_block - lowest_block;
if (page_no == 0)
page_no = 1; /* force Empty message */
sis->max = page_no;
sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
out:
return ret;
bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n");
ret = -EINVAL;
goto out;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,103 | static int setup_swap_map_and_extents(struct swap_info_struct *p,
union swap_header *swap_header,
unsigned char *swap_map,
unsigned long maxpages,
sector_t *span)
{
int i;
unsigned int nr_good_pages;
int nr_extents;
nr_good_pages = maxpages - 1; /* omit header page */
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
return -EINVAL;
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
}
}
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
p->max = maxpages;
p->pages = nr_good_pages;
nr_extents = setup_swap_extents(p, span);
if (nr_extents < 0)
return nr_extents;
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
printk(KERN_WARNING "Empty swap-file\n");
return -EINVAL;
}
return nr_extents;
}
| DoS | 0 | static int setup_swap_map_and_extents(struct swap_info_struct *p,
union swap_header *swap_header,
unsigned char *swap_map,
unsigned long maxpages,
sector_t *span)
{
int i;
unsigned int nr_good_pages;
int nr_extents;
nr_good_pages = maxpages - 1; /* omit header page */
for (i = 0; i < swap_header->info.nr_badpages; i++) {
unsigned int page_nr = swap_header->info.badpages[i];
if (page_nr == 0 || page_nr > swap_header->info.last_page)
return -EINVAL;
if (page_nr < maxpages) {
swap_map[page_nr] = SWAP_MAP_BAD;
nr_good_pages--;
}
}
if (nr_good_pages) {
swap_map[0] = SWAP_MAP_BAD;
p->max = maxpages;
p->pages = nr_good_pages;
nr_extents = setup_swap_extents(p, span);
if (nr_extents < 0)
return nr_extents;
nr_good_pages = p->pages;
}
if (!nr_good_pages) {
printk(KERN_WARNING "Empty swap-file\n");
return -EINVAL;
}
return nr_extents;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,104 | void si_swapinfo(struct sysinfo *val)
{
unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *si = swap_info[type];
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += si->inuse_pages;
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
| DoS | 0 | void si_swapinfo(struct sysinfo *val)
{
unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *si = swap_info[type];
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += si->inuse_pages;
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,105 | static bool swap_count_continued(struct swap_info_struct *si,
pgoff_t offset, unsigned char count)
{
struct page *head;
struct page *page;
unsigned char *map;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
BUG_ON(count & COUNT_CONTINUED);
return false; /* need to add count continuation */
}
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
map = kmap_atomic(page, KM_USER0) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
/*
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
map = kmap_atomic(page, KM_USER0) + offset;
}
if (*map == SWAP_CONT_MAX) {
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.next, struct page, lru);
if (page == head)
return false; /* add count continuation */
map = kmap_atomic(page, KM_USER0) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
map = kmap_atomic(page, KM_USER0) + offset;
*map = COUNT_CONTINUED;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
}
return true; /* incremented */
} else { /* decrementing */
/*
* Think of how you subtract 1 from 1000
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
map = kmap_atomic(page, KM_USER0) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
map = kmap_atomic(page, KM_USER0) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
}
return count == COUNT_CONTINUED;
}
}
| DoS | 0 | static bool swap_count_continued(struct swap_info_struct *si,
pgoff_t offset, unsigned char count)
{
struct page *head;
struct page *page;
unsigned char *map;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
BUG_ON(count & COUNT_CONTINUED);
return false; /* need to add count continuation */
}
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
map = kmap_atomic(page, KM_USER0) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
/*
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
map = kmap_atomic(page, KM_USER0) + offset;
}
if (*map == SWAP_CONT_MAX) {
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.next, struct page, lru);
if (page == head)
return false; /* add count continuation */
map = kmap_atomic(page, KM_USER0) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
map = kmap_atomic(page, KM_USER0) + offset;
*map = COUNT_CONTINUED;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
}
return true; /* incremented */
} else { /* decrementing */
/*
* Think of how you subtract 1 from 1000
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
map = kmap_atomic(page, KM_USER0) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
map = kmap_atomic(page, KM_USER0) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
kunmap_atomic(map, KM_USER0);
page = list_entry(page->lru.prev, struct page, lru);
}
return count == COUNT_CONTINUED;
}
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,106 | int swap_duplicate(swp_entry_t entry)
{
int err = 0;
while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
| DoS | 0 | int swap_duplicate(swp_entry_t entry)
{
int err = 0;
while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,107 | static unsigned char swap_entry_free(struct swap_info_struct *p,
swp_entry_t entry, unsigned char usage)
{
unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;
count = p->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
has_cache = 0;
} else if (count == SWAP_MAP_SHMEM) {
/*
* Or we could insist on shmem.c using a special
* swap_shmem_free() and free_shmem_swap_and_cache()...
*/
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) {
if (swap_count_continued(p, offset, count))
count = SWAP_MAP_MAX | COUNT_CONTINUED;
else
count = SWAP_MAP_MAX;
} else
count--;
}
if (!count)
mem_cgroup_uncharge_swap(entry);
usage = count | has_cache;
p->swap_map[offset] = usage;
/* free if no reference */
if (!usage) {
struct gendisk *disk = p->bdev->bd_disk;
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
if (swap_list.next >= 0 &&
p->prio > swap_info[swap_list.next]->prio)
swap_list.next = p->type;
nr_swap_pages++;
p->inuse_pages--;
if ((p->flags & SWP_BLKDEV) &&
disk->fops->swap_slot_free_notify)
disk->fops->swap_slot_free_notify(p->bdev, offset);
}
return usage;
}
| DoS | 0 | static unsigned char swap_entry_free(struct swap_info_struct *p,
swp_entry_t entry, unsigned char usage)
{
unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;
count = p->swap_map[offset];
has_cache = count & SWAP_HAS_CACHE;
count &= ~SWAP_HAS_CACHE;
if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
has_cache = 0;
} else if (count == SWAP_MAP_SHMEM) {
/*
* Or we could insist on shmem.c using a special
* swap_shmem_free() and free_shmem_swap_and_cache()...
*/
count = 0;
} else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
if (count == COUNT_CONTINUED) {
if (swap_count_continued(p, offset, count))
count = SWAP_MAP_MAX | COUNT_CONTINUED;
else
count = SWAP_MAP_MAX;
} else
count--;
}
if (!count)
mem_cgroup_uncharge_swap(entry);
usage = count | has_cache;
p->swap_map[offset] = usage;
/* free if no reference */
if (!usage) {
struct gendisk *disk = p->bdev->bd_disk;
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
if (swap_list.next >= 0 &&
p->prio > swap_info[swap_list.next]->prio)
swap_list.next = p->type;
nr_swap_pages++;
p->inuse_pages--;
if ((p->flags & SWP_BLKDEV) &&
disk->fops->swap_slot_free_notify)
disk->fops->swap_slot_free_notify(p->bdev, offset);
}
return usage;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,108 | void swap_free(swp_entry_t entry)
{
struct swap_info_struct *p;
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, entry, 1);
spin_unlock(&swap_lock);
}
}
| DoS | 0 | void swap_free(swp_entry_t entry)
{
struct swap_info_struct *p;
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, entry, 1);
spin_unlock(&swap_lock);
}
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,109 | static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
struct swap_info_struct *si = v;
int type;
if (v == SEQ_START_TOKEN)
type = 0;
else
type = si->type + 1;
for (; type < nr_swapfiles; type++) {
smp_rmb(); /* read nr_swapfiles before swap_info[type] */
si = swap_info[type];
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
return si;
}
return NULL;
}
| DoS | 0 | static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
struct swap_info_struct *si = v;
int type;
if (v == SEQ_START_TOKEN)
type = 0;
else
type = si->type + 1;
for (; type < nr_swapfiles; type++) {
smp_rmb(); /* read nr_swapfiles before swap_info[type] */
si = swap_info[type];
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
return si;
}
return NULL;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,110 | void swap_shmem_alloc(swp_entry_t entry)
{
__swap_duplicate(entry, SWAP_MAP_SHMEM);
}
| DoS | 0 | void swap_shmem_alloc(swp_entry_t entry)
{
__swap_duplicate(entry, SWAP_MAP_SHMEM);
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,111 | static int swap_show(struct seq_file *swap, void *v)
{
struct swap_info_struct *si = v;
struct file *file;
int len;
if (si == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
file = si->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
si->pages << (PAGE_SHIFT - 10),
si->inuse_pages << (PAGE_SHIFT - 10),
si->prio);
return 0;
}
| DoS | 0 | static int swap_show(struct seq_file *swap, void *v)
{
struct swap_info_struct *si = v;
struct file *file;
int len;
if (si == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
file = si->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
si->pages << (PAGE_SHIFT - 10),
si->inuse_pages << (PAGE_SHIFT - 10),
si->prio);
return 0;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,112 | static void *swap_start(struct seq_file *swap, loff_t *pos)
{
struct swap_info_struct *si;
int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
if (!l)
return SEQ_START_TOKEN;
for (type = 0; type < nr_swapfiles; type++) {
smp_rmb(); /* read nr_swapfiles before swap_info[type] */
si = swap_info[type];
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
return si;
}
return NULL;
}
| DoS | 0 | static void *swap_start(struct seq_file *swap, loff_t *pos)
{
struct swap_info_struct *si;
int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
if (!l)
return SEQ_START_TOKEN;
for (type = 0; type < nr_swapfiles; type++) {
smp_rmb(); /* read nr_swapfiles before swap_info[type] */
si = swap_info[type];
if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
return si;
}
return NULL;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,113 | static void swap_stop(struct seq_file *swap, void *v)
{
mutex_unlock(&swapon_mutex);
}
| DoS | 0 | static void swap_stop(struct seq_file *swap, void *v)
{
mutex_unlock(&swapon_mutex);
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,114 | int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
int type;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
if (!bdev) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
return type;
}
if (bdev == sis->bdev) {
struct swap_extent *se = &sis->first_swap_extent;
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
return type;
}
}
}
spin_unlock(&swap_lock);
if (bdev)
bdput(bdev);
return -ENODEV;
}
| DoS | 0 | int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
int type;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
if (!bdev) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
return type;
}
if (bdev == sis->bdev) {
struct swap_extent *se = &sis->first_swap_extent;
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
return type;
}
}
}
spin_unlock(&swap_lock);
if (bdev)
bdput(bdev);
return -ENODEV;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,115 | void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
unsigned char count;
p = swap_info_get(entry);
if (p) {
count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
if (page)
mem_cgroup_uncharge_swapcache(page, entry, count != 0);
spin_unlock(&swap_lock);
}
}
| DoS | 0 | void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
unsigned char count;
p = swap_info_get(entry);
if (p) {
count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
if (page)
mem_cgroup_uncharge_swapcache(page, entry, count != 0);
spin_unlock(&swap_lock);
}
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,116 | int swapcache_prepare(swp_entry_t entry)
{
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
| DoS | 0 | int swapcache_prepare(swp_entry_t entry)
{
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,117 | sector_t swapdev_block(int type, pgoff_t offset)
{
struct block_device *bdev;
if ((unsigned int)type >= nr_swapfiles)
return 0;
if (!(swap_info[type]->flags & SWP_WRITEOK))
return 0;
return map_swap_entry(swp_entry(type, offset), &bdev);
}
| DoS | 0 | sector_t swapdev_block(int type, pgoff_t offset)
{
struct block_device *bdev;
if ((unsigned int)type >= nr_swapfiles)
return 0;
if (!(swap_info[type]->flags & SWP_WRITEOK))
return 0;
return map_swap_entry(swp_entry(type, offset), &bdev);
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,118 | static unsigned swaps_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
poll_wait(file, &proc_poll_wait, wait);
if (seq->poll_event != atomic_read(&proc_poll_event)) {
seq->poll_event = atomic_read(&proc_poll_event);
return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
}
return POLLIN | POLLRDNORM;
}
| DoS | 0 | static unsigned swaps_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
poll_wait(file, &proc_poll_wait, wait);
if (seq->poll_event != atomic_read(&proc_poll_event)) {
seq->poll_event = atomic_read(&proc_poll_event);
return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
}
return POLLIN | POLLRDNORM;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,119 | int try_to_free_swap(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
if (!PageSwapCache(page))
return 0;
if (PageWriteback(page))
return 0;
if (page_swapcount(page))
return 0;
/*
* Once hibernation has begun to create its image of memory,
* there's a danger that one of the calls to try_to_free_swap()
* - most probably a call from __try_to_reclaim_swap() while
* hibernation is allocating its own swap pages for the image,
* but conceivably even a call from memory reclaim - will free
* the swap from a page which has already been recorded in the
* image as a clean swapcache page, and then reuse its swap for
* another page of the image. On waking from hibernation, the
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibration suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
return 0;
delete_from_swap_cache(page);
SetPageDirty(page);
return 1;
}
| DoS | 0 | int try_to_free_swap(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
if (!PageSwapCache(page))
return 0;
if (PageWriteback(page))
return 0;
if (page_swapcount(page))
return 0;
/*
* Once hibernation has begun to create its image of memory,
* there's a danger that one of the calls to try_to_free_swap()
* - most probably a call from __try_to_reclaim_swap() while
* hibernation is allocating its own swap pages for the image,
* but conceivably even a call from memory reclaim - will free
* the swap from a page which has already been recorded in the
* image as a clean swapcache page, and then reuse its swap for
* another page of the image. On waking from hibernation, the
* original page might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibration suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
return 0;
delete_from_swap_cache(page);
SetPageDirty(page);
return 1;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,120 | static int unuse_mm(struct mm_struct *mm,
swp_entry_t entry, struct page *page)
{
struct vm_area_struct *vma;
int ret = 0;
if (!down_read_trylock(&mm->mmap_sem)) {
/*
* Activate page so shrink_inactive_list is unlikely to unmap
* its ptes while lock is dropped, so swapoff can make progress.
*/
activate_page(page);
unlock_page(page);
down_read(&mm->mmap_sem);
lock_page(page);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
break;
}
up_read(&mm->mmap_sem);
return (ret < 0)? ret: 0;
}
| DoS | 0 | static int unuse_mm(struct mm_struct *mm,
swp_entry_t entry, struct page *page)
{
struct vm_area_struct *vma;
int ret = 0;
if (!down_read_trylock(&mm->mmap_sem)) {
/*
* Activate page so shrink_inactive_list is unlikely to unmap
* its ptes while lock is dropped, so swapoff can make progress.
*/
activate_page(page);
unlock_page(page);
down_read(&mm->mmap_sem);
lock_page(page);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
break;
}
up_read(&mm->mmap_sem);
return (ret < 0)? ret: 0;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,121 | static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
int ret = 0;
/*
* We don't actually need pte lock while scanning for swp_pte: since
* we hold page lock and mmap_sem, swp_pte cannot be inserted into the
* page table while we're scanning; though it could get zapped, and on
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
/*
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
goto out;
pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
out:
return ret;
}
| DoS | 0 | static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
int ret = 0;
/*
* We don't actually need pte lock while scanning for swp_pte: since
* we hold page lock and mmap_sem, swp_pte cannot be inserted into the
* page table while we're scanning; though it could get zapped, and on
* some architectures (e.g. x86_32 with PAE) we might catch a glimpse
* of unmatched parts which look like swp_pte, so unuse_pte must
* recheck under pte lock. Scanning without pte lock lets it be
* preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
*/
pte = pte_offset_map(pmd, addr);
do {
/*
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
goto out;
pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
out:
return ret;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,122 | int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
struct swap_info_struct *si;
int our_page_cluster = page_cluster;
pgoff_t target, toff;
pgoff_t base, end;
int nr_pages = 0;
if (!our_page_cluster) /* no readahead */
return 0;
si = swap_info[swp_type(entry)];
target = swp_offset(entry);
base = (target >> our_page_cluster) << our_page_cluster;
end = base + (1 << our_page_cluster);
if (!base) /* first page is swap header */
base++;
spin_lock(&swap_lock);
if (end > si->max) /* don't go beyond end of map */
end = si->max;
/* Count contiguous allocated slots above our target */
for (toff = target; ++toff < end; nr_pages++) {
/* Don't read in free or bad pages */
if (!si->swap_map[toff])
break;
if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
break;
}
/* Count contiguous allocated slots below our target */
for (toff = target; --toff >= base; nr_pages++) {
/* Don't read in free or bad pages */
if (!si->swap_map[toff])
break;
if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
break;
}
spin_unlock(&swap_lock);
/*
* Indicate starting offset, and return number of pages to get:
* if only 1, say 0, since there's then no readahead to be done.
*/
*offset = ++toff;
return nr_pages? ++nr_pages: 0;
}
| DoS | 0 | int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
struct swap_info_struct *si;
int our_page_cluster = page_cluster;
pgoff_t target, toff;
pgoff_t base, end;
int nr_pages = 0;
if (!our_page_cluster) /* no readahead */
return 0;
si = swap_info[swp_type(entry)];
target = swp_offset(entry);
base = (target >> our_page_cluster) << our_page_cluster;
end = base + (1 << our_page_cluster);
if (!base) /* first page is swap header */
base++;
spin_lock(&swap_lock);
if (end > si->max) /* don't go beyond end of map */
end = si->max;
/* Count contiguous allocated slots above our target */
for (toff = target; ++toff < end; nr_pages++) {
/* Don't read in free or bad pages */
if (!si->swap_map[toff])
break;
if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
break;
}
/* Count contiguous allocated slots below our target */
for (toff = target; --toff >= base; nr_pages++) {
/* Don't read in free or bad pages */
if (!si->swap_map[toff])
break;
if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
break;
}
spin_unlock(&swap_lock);
/*
* Indicate starting offset, and return number of pages to get:
* if only 1, say 0, since there's then no readahead to be done.
*/
*offset = ++toff;
return nr_pages? ++nr_pages: 0;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,123 | static int wait_for_discard(void *word)
{
schedule();
return 0;
}
| DoS | 0 | static int wait_for_discard(void *word)
{
schedule();
return 0;
}
| @@ -932,9 +932,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (unlikely(pmd_trans_huge(*pmd)))
- continue;
- if (pmd_none_or_clear_bad(pmd))
+ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret) | CWE-264 | null | null |
17,124 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
enum charge_type ctype)
{
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
__mem_cgroup_cancel_charge(memcg, nr_pages);
return;
}
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
*/
pc->mem_cgroup = memcg;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
* Especially when a page_cgroup is taken from a page, pc->mem_cgroup
* is accessed after testing USED bit. To make pc->mem_cgroup visible
* before USED bit, we need memory barrier here.
* See mem_cgroup_add_lru_list(), etc.
*/
smp_wmb();
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_CACHE:
case MEM_CGROUP_CHARGE_TYPE_SHMEM:
SetPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
ClearPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
default:
break;
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
WARN_ON_ONCE(PageLRU(page));
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
memcg_check_events(memcg, page);
}
| DoS | 0 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
enum charge_type ctype)
{
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
__mem_cgroup_cancel_charge(memcg, nr_pages);
return;
}
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
*/
pc->mem_cgroup = memcg;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
* Especially when a page_cgroup is taken from a page, pc->mem_cgroup
* is accessed after testing USED bit. To make pc->mem_cgroup visible
* before USED bit, we need memory barrier here.
* See mem_cgroup_add_lru_list(), etc.
*/
smp_wmb();
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_CACHE:
case MEM_CGROUP_CHARGE_TYPE_SHMEM:
SetPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
ClearPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
default:
break;
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
WARN_ON_ONCE(PageLRU(page));
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
memcg_check_events(memcg, page);
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,125 | __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *zone = page_zone(page);
unsigned long flags;
bool removed = false;
/*
* In some case, SwapCache, FUSE(splice_buf->radixtree), the page
* is already on LRU. It means the page may on some other page_cgroup's
* LRU. Take care of it.
*/
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page)) {
del_page_from_lru_list(zone, page, page_lru(page));
ClearPageLRU(page);
removed = true;
}
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
if (removed) {
add_page_to_lru_list(zone, page, page_lru(page));
SetPageLRU(page);
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
return;
}
| DoS | 0 | __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *zone = page_zone(page);
unsigned long flags;
bool removed = false;
/*
* In some case, SwapCache, FUSE(splice_buf->radixtree), the page
* is already on LRU. It means the page may on some other page_cgroup's
* LRU. Take care of it.
*/
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page)) {
del_page_from_lru_list(zone, page, page_lru(page));
ClearPageLRU(page);
removed = true;
}
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
if (removed) {
add_page_to_lru_list(zone, page, page_lru(page));
SetPageLRU(page);
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
return;
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,126 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
if (mem_cgroup_disabled())
return;
if (!memcg)
return;
cgroup_exclude_rmdir(&memcg->css);
__mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
/*
* Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count.
* Fix it by uncharging from memsw. Basically, this SwapCache is stable
* under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
* may call delete_from_swap_cache() before reach here.
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
struct mem_cgroup *swap_memcg;
unsigned short id;
id = swap_cgroup_record(ent, 0);
rcu_read_lock();
swap_memcg = mem_cgroup_lookup(id);
if (swap_memcg) {
/*
* This recorded memcg can be obsolete one. So, avoid
* calling css_tryget
*/
if (!mem_cgroup_is_root(swap_memcg))
res_counter_uncharge(&swap_memcg->memsw,
PAGE_SIZE);
mem_cgroup_swap_statistics(swap_memcg, false);
mem_cgroup_put(swap_memcg);
}
rcu_read_unlock();
}
/*
* At swapin, we may charge account against cgroup which has no tasks.
* So, rmdir()->pre_destroy() can be called while we do this charge.
* In that case, we need to call pre_destroy() again. check it here.
*/
cgroup_release_and_wakeup_rmdir(&memcg->css);
}
| DoS | 0 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
if (mem_cgroup_disabled())
return;
if (!memcg)
return;
cgroup_exclude_rmdir(&memcg->css);
__mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
/*
* Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count.
* Fix it by uncharging from memsw. Basically, this SwapCache is stable
* under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
* may call delete_from_swap_cache() before reach here.
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
struct mem_cgroup *swap_memcg;
unsigned short id;
id = swap_cgroup_record(ent, 0);
rcu_read_lock();
swap_memcg = mem_cgroup_lookup(id);
if (swap_memcg) {
/*
* This recorded memcg can be obsolete one. So, avoid
* calling css_tryget
*/
if (!mem_cgroup_is_root(swap_memcg))
res_counter_uncharge(&swap_memcg->memsw,
PAGE_SIZE);
mem_cgroup_swap_statistics(swap_memcg, false);
mem_cgroup_put(swap_memcg);
}
rcu_read_unlock();
}
/*
* At swapin, we may charge account against cgroup which has no tasks.
* So, rmdir()->pre_destroy() can be called while we do this charge.
* In that case, we need to call pre_destroy() again. check it here.
*/
cgroup_release_and_wakeup_rmdir(&memcg->css);
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,127 | static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
mem_cgroup_remove_from_trees(memcg);
free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
kfree(memcg);
else
vfree(memcg);
}
| DoS | 0 | static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
mem_cgroup_remove_from_trees(memcg);
free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
kfree(memcg);
else
vfree(memcg);
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,128 | static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->private;
pte_t *pte;
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
mc.precharge++; /* increment precharge temporarily */
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
| DoS | 0 | static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->private;
pte_t *pte;
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
mc.precharge++; /* increment precharge temporarily */
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,129 | struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
enum lru_list lru)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc;
if (mem_cgroup_disabled())
return &zone->lruvec;
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
return &mz->lruvec;
}
| DoS | 0 | struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
enum lru_list lru)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
struct page_cgroup *pc;
if (mem_cgroup_disabled())
return &zone->lruvec;
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
return &mz->lruvec;
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,130 | int mem_cgroup_prepare_migration(struct page *page,
struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
{
struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
enum charge_type ctype;
int ret = 0;
*memcgp = NULL;
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
return 0;
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
memcg = pc->mem_cgroup;
css_get(&memcg->css);
/*
* At migrating an anonymous page, its mapcount goes down
* to 0 and uncharge() will be called. But, even if it's fully
* unmapped, migration may fail and this page has to be
* charged again. We set MIGRATION flag here and delay uncharge
* until end_migration() is called
*
* Corner Case Thinking
* A)
* When the old page was mapped as Anon and it's unmap-and-freed
* while migration was ongoing.
* If unmap finds the old page, uncharge() of it will be delayed
* until end_migration(). If unmap finds a new page, it's
* uncharged when it make mapcount to be 1->0. If unmap code
* finds swap_migration_entry, the new page will not be mapped
* and end_migration() will find it(mapcount==0).
*
* B)
* When the old page was mapped but migraion fails, the kernel
* remaps it. A charge for it is kept by MIGRATION flag even
* if mapcount goes down to 0. We can do remap successfully
* without charging it again.
*
* C)
* The "old" page is under lock_page() until the end of
* migration, so, the old page itself will not be swapped-out.
* If the new page is swapped out before end_migraton, our
* hook to usual swap-out path will catch the event.
*/
if (PageAnon(page))
SetPageCgroupMigration(pc);
}
unlock_page_cgroup(pc);
/*
* If the page is not charged at this point,
* we return here.
*/
if (!memcg)
return 0;
*memcgp = memcg;
ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
css_put(&memcg->css);/* drop extra refcnt */
if (ret) {
if (PageAnon(page)) {
lock_page_cgroup(pc);
ClearPageCgroupMigration(pc);
unlock_page_cgroup(pc);
/*
* The old page may be fully unmapped while we kept it.
*/
mem_cgroup_uncharge_page(page);
}
/* we'll need to revisit this error code (we have -EINTR) */
return -ENOMEM;
}
/*
* We charge new page before it's used/mapped. So, even if unlock_page()
* is called before end_migration, we can catch all events on this new
* page. In the case new page is migrated but not remapped, new page's
* mapcount will be finally 0 and we call uncharge in end_migration().
*/
pc = lookup_page_cgroup(newpage);
if (PageAnon(page))
ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
else if (page_is_file_cache(page))
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
return ret;
}
| DoS | 0 | int mem_cgroup_prepare_migration(struct page *page,
struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
{
struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
enum charge_type ctype;
int ret = 0;
*memcgp = NULL;
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
return 0;
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
memcg = pc->mem_cgroup;
css_get(&memcg->css);
/*
* At migrating an anonymous page, its mapcount goes down
* to 0 and uncharge() will be called. But, even if it's fully
* unmapped, migration may fail and this page has to be
* charged again. We set MIGRATION flag here and delay uncharge
* until end_migration() is called
*
* Corner Case Thinking
* A)
* When the old page was mapped as Anon and it's unmap-and-freed
* while migration was ongoing.
* If unmap finds the old page, uncharge() of it will be delayed
* until end_migration(). If unmap finds a new page, it's
* uncharged when it make mapcount to be 1->0. If unmap code
* finds swap_migration_entry, the new page will not be mapped
* and end_migration() will find it(mapcount==0).
*
* B)
* When the old page was mapped but migraion fails, the kernel
* remaps it. A charge for it is kept by MIGRATION flag even
* if mapcount goes down to 0. We can do remap successfully
* without charging it again.
*
* C)
* The "old" page is under lock_page() until the end of
* migration, so, the old page itself will not be swapped-out.
* If the new page is swapped out before end_migraton, our
* hook to usual swap-out path will catch the event.
*/
if (PageAnon(page))
SetPageCgroupMigration(pc);
}
unlock_page_cgroup(pc);
/*
* If the page is not charged at this point,
* we return here.
*/
if (!memcg)
return 0;
*memcgp = memcg;
ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
css_put(&memcg->css);/* drop extra refcnt */
if (ret) {
if (PageAnon(page)) {
lock_page_cgroup(pc);
ClearPageCgroupMigration(pc);
unlock_page_cgroup(pc);
/*
* The old page may be fully unmapped while we kept it.
*/
mem_cgroup_uncharge_page(page);
}
/* we'll need to revisit this error code (we have -EINTR) */
return -ENOMEM;
}
/*
* We charge new page before it's used/mapped. So, even if unlock_page()
* is called before end_migration, we can catch all events on this new
* page. In the case new page is migrated but not remapped, new page's
* mapcount will be finally 0 and we call uncharge in end_migration().
*/
pc = lookup_page_cgroup(newpage);
if (PageAnon(page))
ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
else if (page_is_file_cache(page))
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
return ret;
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,131 | void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(oldpage);
/* fix accounting on old pages */
lock_page_cgroup(pc);
memcg = pc->mem_cgroup;
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
ClearPageCgroupUsed(pc);
unlock_page_cgroup(pc);
if (PageSwapBacked(oldpage))
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
/*
* Even if newpage->mapping was NULL before starting replacement,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
__mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
}
| DoS | 0 | void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
struct mem_cgroup *memcg;
struct page_cgroup *pc;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(oldpage);
/* fix accounting on old pages */
lock_page_cgroup(pc);
memcg = pc->mem_cgroup;
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
ClearPageCgroupUsed(pc);
unlock_page_cgroup(pc);
if (PageSwapBacked(oldpage))
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
/*
* Even if newpage->mapping was NULL before starting replacement,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
__mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
}
| @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4465,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
| null | null | null |
17,132 | static int alignfile(struct file *file, loff_t *foffset)
{
static const char buf[4] = { 0, };
DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
return 1;
}
| DoS | 0 | static int alignfile(struct file *file, loff_t *foffset)
{
static const char buf[4] = { 0, };
DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
return 1;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,133 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
unsigned long load_addr, unsigned long interp_load_addr)
{
unsigned long p = bprm->p;
int argc = bprm->argc;
int envc = bprm->envc;
elf_addr_t __user *argv;
elf_addr_t __user *envp;
elf_addr_t __user *sp;
elf_addr_t __user *u_platform;
elf_addr_t __user *u_base_platform;
elf_addr_t __user *u_rand_bytes;
const char *k_platform = ELF_PLATFORM;
const char *k_base_platform = ELF_BASE_PLATFORM;
unsigned char k_rand_bytes[16];
int items;
elf_addr_t *elf_info;
int ei_index = 0;
const struct cred *cred = current_cred();
struct vm_area_struct *vma;
/*
* In some cases (e.g. Hyper-Threading), we want to avoid L1
* evictions by the processes running on the same package. One
* thing we can do is to shuffle the initial stack for them.
*/
p = arch_align_stack(p);
/*
* If this architecture has a platform capability string, copy it
* to userspace. In some cases (Sparc), this info is impossible
* for userspace to get any other way, in others (i386) it is
* merely difficult.
*/
u_platform = NULL;
if (k_platform) {
size_t len = strlen(k_platform) + 1;
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
if (__copy_to_user(u_platform, k_platform, len))
return -EFAULT;
}
/*
* If this architecture has a "base" platform capability
* string, copy it to userspace.
*/
u_base_platform = NULL;
if (k_base_platform) {
size_t len = strlen(k_base_platform) + 1;
u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
if (__copy_to_user(u_base_platform, k_base_platform, len))
return -EFAULT;
}
/*
* Generate 16 random bytes for userspace PRNG seeding.
*/
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
u_rand_bytes = (elf_addr_t __user *)
STACK_ALLOC(p, sizeof(k_rand_bytes));
if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
/* Create the ELF interpreter info */
elf_info = (elf_addr_t *)current->mm->saved_auxv;
/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
#define NEW_AUX_ENT(id, val) \
do { \
elf_info[ei_index++] = id; \
elf_info[ei_index++] = val; \
} while (0)
#ifdef ARCH_DLINFO
/*
* ARCH_DLINFO must come first so PPC can do its special alignment of
* AUXV.
* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
* ARCH_DLINFO changes
*/
ARCH_DLINFO;
#endif
NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
NEW_AUX_ENT(AT_FLAGS, 0);
NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
NEW_AUX_ENT(AT_UID, cred->uid);
NEW_AUX_ENT(AT_EUID, cred->euid);
NEW_AUX_ENT(AT_GID, cred->gid);
NEW_AUX_ENT(AT_EGID, cred->egid);
NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
NEW_AUX_ENT(AT_EXECFN, bprm->exec);
if (k_platform) {
NEW_AUX_ENT(AT_PLATFORM,
(elf_addr_t)(unsigned long)u_platform);
}
if (k_base_platform) {
NEW_AUX_ENT(AT_BASE_PLATFORM,
(elf_addr_t)(unsigned long)u_base_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
memset(&elf_info[ei_index], 0,
sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
/* And advance past the AT_NULL entry. */
ei_index += 2;
sp = STACK_ADD(p, ei_index);
items = (argc + 1) + (envc + 1) + 1;
bprm->p = STACK_ROUND(sp, items);
/* Point sp at the lowest address on the stack */
#ifdef CONFIG_STACK_GROWSUP
sp = (elf_addr_t __user *)bprm->p - items - ei_index;
bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
#else
sp = (elf_addr_t __user *)bprm->p;
#endif
/*
* Grow the stack manually; some architectures have a limit on how
* far ahead a user-space access may be in order to grow the stack.
*/
vma = find_extend_vma(current->mm, bprm->p);
if (!vma)
return -EFAULT;
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
if (__put_user(argc, sp++))
return -EFAULT;
argv = sp;
envp = argv + argc + 1;
/* Populate argv and envp */
p = current->mm->arg_end = current->mm->arg_start;
while (argc-- > 0) {
size_t len;
if (__put_user((elf_addr_t)p, argv++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
if (__put_user(0, argv))
return -EFAULT;
current->mm->arg_end = current->mm->env_start = p;
while (envc-- > 0) {
size_t len;
if (__put_user((elf_addr_t)p, envp++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
if (__put_user(0, envp))
return -EFAULT;
current->mm->env_end = p;
/* Put the elf_info on the stack in the right place. */
sp = (elf_addr_t __user *)envp + 1;
if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
return -EFAULT;
return 0;
}
| DoS | 0 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
unsigned long load_addr, unsigned long interp_load_addr)
{
unsigned long p = bprm->p;
int argc = bprm->argc;
int envc = bprm->envc;
elf_addr_t __user *argv;
elf_addr_t __user *envp;
elf_addr_t __user *sp;
elf_addr_t __user *u_platform;
elf_addr_t __user *u_base_platform;
elf_addr_t __user *u_rand_bytes;
const char *k_platform = ELF_PLATFORM;
const char *k_base_platform = ELF_BASE_PLATFORM;
unsigned char k_rand_bytes[16];
int items;
elf_addr_t *elf_info;
int ei_index = 0;
const struct cred *cred = current_cred();
struct vm_area_struct *vma;
/*
* In some cases (e.g. Hyper-Threading), we want to avoid L1
* evictions by the processes running on the same package. One
* thing we can do is to shuffle the initial stack for them.
*/
p = arch_align_stack(p);
/*
* If this architecture has a platform capability string, copy it
* to userspace. In some cases (Sparc), this info is impossible
* for userspace to get any other way, in others (i386) it is
* merely difficult.
*/
u_platform = NULL;
if (k_platform) {
size_t len = strlen(k_platform) + 1;
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
if (__copy_to_user(u_platform, k_platform, len))
return -EFAULT;
}
/*
* If this architecture has a "base" platform capability
* string, copy it to userspace.
*/
u_base_platform = NULL;
if (k_base_platform) {
size_t len = strlen(k_base_platform) + 1;
u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
if (__copy_to_user(u_base_platform, k_base_platform, len))
return -EFAULT;
}
/*
* Generate 16 random bytes for userspace PRNG seeding.
*/
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
u_rand_bytes = (elf_addr_t __user *)
STACK_ALLOC(p, sizeof(k_rand_bytes));
if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
/* Create the ELF interpreter info */
elf_info = (elf_addr_t *)current->mm->saved_auxv;
/* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
#define NEW_AUX_ENT(id, val) \
do { \
elf_info[ei_index++] = id; \
elf_info[ei_index++] = val; \
} while (0)
#ifdef ARCH_DLINFO
/*
* ARCH_DLINFO must come first so PPC can do its special alignment of
* AUXV.
* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
* ARCH_DLINFO changes
*/
ARCH_DLINFO;
#endif
NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
NEW_AUX_ENT(AT_FLAGS, 0);
NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
NEW_AUX_ENT(AT_UID, cred->uid);
NEW_AUX_ENT(AT_EUID, cred->euid);
NEW_AUX_ENT(AT_GID, cred->gid);
NEW_AUX_ENT(AT_EGID, cred->egid);
NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
NEW_AUX_ENT(AT_EXECFN, bprm->exec);
if (k_platform) {
NEW_AUX_ENT(AT_PLATFORM,
(elf_addr_t)(unsigned long)u_platform);
}
if (k_base_platform) {
NEW_AUX_ENT(AT_BASE_PLATFORM,
(elf_addr_t)(unsigned long)u_base_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
memset(&elf_info[ei_index], 0,
sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
/* And advance past the AT_NULL entry. */
ei_index += 2;
sp = STACK_ADD(p, ei_index);
items = (argc + 1) + (envc + 1) + 1;
bprm->p = STACK_ROUND(sp, items);
/* Point sp at the lowest address on the stack */
#ifdef CONFIG_STACK_GROWSUP
sp = (elf_addr_t __user *)bprm->p - items - ei_index;
bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
#else
sp = (elf_addr_t __user *)bprm->p;
#endif
/*
* Grow the stack manually; some architectures have a limit on how
* far ahead a user-space access may be in order to grow the stack.
*/
vma = find_extend_vma(current->mm, bprm->p);
if (!vma)
return -EFAULT;
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
if (__put_user(argc, sp++))
return -EFAULT;
argv = sp;
envp = argv + argc + 1;
/* Populate argv and envp */
p = current->mm->arg_end = current->mm->arg_start;
while (argc-- > 0) {
size_t len;
if (__put_user((elf_addr_t)p, argv++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
if (__put_user(0, argv))
return -EFAULT;
current->mm->arg_end = current->mm->env_start = p;
while (envc-- > 0) {
size_t len;
if (__put_user((elf_addr_t)p, envp++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
if (__put_user(0, envp))
return -EFAULT;
current->mm->env_end = p;
/* Put the elf_info on the stack in the right place. */
sp = (elf_addr_t __user *)envp + 1;
if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
return -EFAULT;
return 0;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,134 | static void do_thread_regset_writeback(struct task_struct *task,
const struct user_regset *regset)
{
if (regset->writeback)
regset->writeback(task, regset, 1);
}
| DoS | 0 | static void do_thread_regset_writeback(struct task_struct *task,
const struct user_regset *regset)
{
if (regset->writeback)
regset->writeback(task, regset, 1);
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,135 | static int elf_core_dump(struct coredump_params *cprm)
{
int has_dumped = 0;
mm_segment_t fs;
int segs;
size_t size = 0;
struct vm_area_struct *vma, *gate_vma;
struct elfhdr *elf = NULL;
loff_t offset = 0, dataoff, foffset;
struct elf_note_info info;
struct elf_phdr *phdr4note = NULL;
struct elf_shdr *shdr4extnum = NULL;
Elf_Half e_phnum;
elf_addr_t e_shoff;
/*
* We no longer stop all VM operations.
*
* This is because those proceses that could possibly change map_count
* or the mmap / vma pages are now blocked in do_exit on current
* finishing this core dump.
*
* Only ptrace can touch these memory addresses, but it doesn't change
* the map_count or the pages allocated. So no possibility of crashing
* exists while dumping the mm->vm_next areas to the core file.
*/
/* alloc memory for large data structures: too large to be on stack */
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
goto out;
/*
* The number of segs are recored into ELF header as 16bit value.
* Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
*/
segs = current->mm->map_count;
segs += elf_core_extra_phdrs();
gate_vma = get_gate_vma(current->mm);
if (gate_vma != NULL)
segs++;
/* for notes section */
segs++;
/* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
* this, kernel supports extended numbering. Have a look at
* include/linux/elf.h for further information. */
e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
/*
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
if (!fill_note_info(elf, e_phnum, &info, cprm->signr, cprm->regs))
goto cleanup;
has_dumped = 1;
current->flags |= PF_DUMPCORE;
fs = get_fs();
set_fs(KERNEL_DS);
offset += sizeof(*elf); /* Elf header */
offset += segs * sizeof(struct elf_phdr); /* Program headers */
foffset = offset;
/* Write notes phdr entry */
{
size_t sz = get_note_info_size(&info);
sz += elf_coredump_extra_notes_size();
phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
if (!phdr4note)
goto end_coredump;
fill_elf_note_phdr(phdr4note, sz, offset);
offset += sz;
}
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
offset += elf_core_extra_data_size();
e_shoff = offset;
if (e_phnum == PN_XNUM) {
shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
if (!shdr4extnum)
goto end_coredump;
fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
}
offset = dataoff;
size += sizeof(*elf);
if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
goto end_coredump;
size += sizeof(*phdr4note);
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
/* Write program headers for segments dump */
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
struct elf_phdr phdr;
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
if (vma->vm_flags & VM_WRITE)
phdr.p_flags |= PF_W;
if (vma->vm_flags & VM_EXEC)
phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
}
if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
goto end_coredump;
/* write out the notes section */
if (!write_note_info(&info, cprm->file, &foffset))
goto end_coredump;
if (elf_coredump_extra_notes_write(cprm->file, &foffset))
goto end_coredump;
/* Align to page */
if (!dump_seek(cprm->file, dataoff - foffset))
goto end_coredump;
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
unsigned long addr;
unsigned long end;
end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
int stop;
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
kunmap(page);
page_cache_release(page);
} else
stop = !dump_seek(cprm->file, PAGE_SIZE);
if (stop)
goto end_coredump;
}
}
if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
goto end_coredump;
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
goto end_coredump;
}
end_coredump:
set_fs(fs);
cleanup:
free_note_info(&info);
kfree(shdr4extnum);
kfree(phdr4note);
kfree(elf);
out:
return has_dumped;
}
| DoS | 0 | static int elf_core_dump(struct coredump_params *cprm)
{
int has_dumped = 0;
mm_segment_t fs;
int segs;
size_t size = 0;
struct vm_area_struct *vma, *gate_vma;
struct elfhdr *elf = NULL;
loff_t offset = 0, dataoff, foffset;
struct elf_note_info info;
struct elf_phdr *phdr4note = NULL;
struct elf_shdr *shdr4extnum = NULL;
Elf_Half e_phnum;
elf_addr_t e_shoff;
/*
* We no longer stop all VM operations.
*
* This is because those proceses that could possibly change map_count
* or the mmap / vma pages are now blocked in do_exit on current
* finishing this core dump.
*
* Only ptrace can touch these memory addresses, but it doesn't change
* the map_count or the pages allocated. So no possibility of crashing
* exists while dumping the mm->vm_next areas to the core file.
*/
/* alloc memory for large data structures: too large to be on stack */
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
goto out;
/*
* The number of segs are recored into ELF header as 16bit value.
* Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
*/
segs = current->mm->map_count;
segs += elf_core_extra_phdrs();
gate_vma = get_gate_vma(current->mm);
if (gate_vma != NULL)
segs++;
/* for notes section */
segs++;
/* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
* this, kernel supports extended numbering. Have a look at
* include/linux/elf.h for further information. */
e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
/*
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
if (!fill_note_info(elf, e_phnum, &info, cprm->signr, cprm->regs))
goto cleanup;
has_dumped = 1;
current->flags |= PF_DUMPCORE;
fs = get_fs();
set_fs(KERNEL_DS);
offset += sizeof(*elf); /* Elf header */
offset += segs * sizeof(struct elf_phdr); /* Program headers */
foffset = offset;
/* Write notes phdr entry */
{
size_t sz = get_note_info_size(&info);
sz += elf_coredump_extra_notes_size();
phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
if (!phdr4note)
goto end_coredump;
fill_elf_note_phdr(phdr4note, sz, offset);
offset += sz;
}
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
offset += elf_core_extra_data_size();
e_shoff = offset;
if (e_phnum == PN_XNUM) {
shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
if (!shdr4extnum)
goto end_coredump;
fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
}
offset = dataoff;
size += sizeof(*elf);
if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
goto end_coredump;
size += sizeof(*phdr4note);
if (size > cprm->limit
|| !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
goto end_coredump;
/* Write program headers for segments dump */
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
struct elf_phdr phdr;
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
phdr.p_memsz = vma->vm_end - vma->vm_start;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
if (vma->vm_flags & VM_WRITE)
phdr.p_flags |= PF_W;
if (vma->vm_flags & VM_EXEC)
phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
size += sizeof(phdr);
if (size > cprm->limit
|| !dump_write(cprm->file, &phdr, sizeof(phdr)))
goto end_coredump;
}
if (!elf_core_write_extra_phdrs(cprm->file, offset, &size, cprm->limit))
goto end_coredump;
/* write out the notes section */
if (!write_note_info(&info, cprm->file, &foffset))
goto end_coredump;
if (elf_coredump_extra_notes_write(cprm->file, &foffset))
goto end_coredump;
/* Align to page */
if (!dump_seek(cprm->file, dataoff - foffset))
goto end_coredump;
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma)) {
unsigned long addr;
unsigned long end;
end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
struct page *page;
int stop;
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
stop = ((size += PAGE_SIZE) > cprm->limit) ||
!dump_write(cprm->file, kaddr,
PAGE_SIZE);
kunmap(page);
page_cache_release(page);
} else
stop = !dump_seek(cprm->file, PAGE_SIZE);
if (stop)
goto end_coredump;
}
}
if (!elf_core_write_extra_data(cprm->file, &size, cprm->limit))
goto end_coredump;
if (e_phnum == PN_XNUM) {
size += sizeof(*shdr4extnum);
if (size > cprm->limit
|| !dump_write(cprm->file, shdr4extnum,
sizeof(*shdr4extnum)))
goto end_coredump;
}
end_coredump:
set_fs(fs);
cleanup:
free_note_info(&info);
kfree(shdr4extnum);
kfree(phdr4note);
kfree(elf);
out:
return has_dumped;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,136 | static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
unsigned long mm_flags)
{
struct vm_area_struct *vma;
size_t size = 0;
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma))
size += vma_dump_size(vma, mm_flags);
return size;
}
| DoS | 0 | static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
unsigned long mm_flags)
{
struct vm_area_struct *vma;
size_t size = 0;
for (vma = first_vma(current, gate_vma); vma != NULL;
vma = next_vma(vma, gate_vma))
size += vma_dump_size(vma, mm_flags);
return size;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,137 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
{
int sz = 0;
struct task_struct *p = t->thread;
t->num_notes = 0;
fill_prstatus(&t->prstatus, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
&(t->prstatus));
t->num_notes++;
sz += notesize(&t->notes[0]);
if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
&t->fpu))) {
fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
&(t->fpu));
t->num_notes++;
sz += notesize(&t->notes[1]);
}
#ifdef ELF_CORE_COPY_XFPREGS
if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
sizeof(t->xfpu), &t->xfpu);
t->num_notes++;
sz += notesize(&t->notes[2]);
}
#endif
return sz;
}
| DoS | 0 | static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
{
int sz = 0;
struct task_struct *p = t->thread;
t->num_notes = 0;
fill_prstatus(&t->prstatus, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
&(t->prstatus));
t->num_notes++;
sz += notesize(&t->notes[0]);
if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
&t->fpu))) {
fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
&(t->fpu));
t->num_notes++;
sz += notesize(&t->notes[1]);
}
#ifdef ELF_CORE_COPY_XFPREGS
if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
sizeof(t->xfpu), &t->xfpu);
t->num_notes++;
sz += notesize(&t->notes[2]);
}
#endif
return sz;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,138 | static unsigned long elf_map(struct file *filep, unsigned long addr,
struct elf_phdr *eppnt, int prot, int type,
unsigned long total_size)
{
unsigned long map_addr;
unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
addr = ELF_PAGESTART(addr);
size = ELF_PAGEALIGN(size);
/* mmap() will return -EINVAL if given a zero size, but a
* segment with zero filesize is perfectly valid */
if (!size)
return addr;
down_write(¤t->mm->mmap_sem);
/*
* total_size is the size of the ELF (interpreter) image.
* The _first_ mmap needs to know the full size, otherwise
* randomization might put this image into an overlapping
* position with the ELF binary image. (since size < total_size)
* So we first map the 'big' image - and unmap the remainder at
* the end. (which unmap is needed for ELF images with holes.)
*/
if (total_size) {
total_size = ELF_PAGEALIGN(total_size);
map_addr = do_mmap(filep, addr, total_size, prot, type, off);
if (!BAD_ADDR(map_addr))
do_munmap(current->mm, map_addr+size, total_size-size);
} else
map_addr = do_mmap(filep, addr, size, prot, type, off);
up_write(¤t->mm->mmap_sem);
return(map_addr);
}
| DoS | 0 | static unsigned long elf_map(struct file *filep, unsigned long addr,
struct elf_phdr *eppnt, int prot, int type,
unsigned long total_size)
{
unsigned long map_addr;
unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
addr = ELF_PAGESTART(addr);
size = ELF_PAGEALIGN(size);
/* mmap() will return -EINVAL if given a zero size, but a
* segment with zero filesize is perfectly valid */
if (!size)
return addr;
down_write(¤t->mm->mmap_sem);
/*
* total_size is the size of the ELF (interpreter) image.
* The _first_ mmap needs to know the full size, otherwise
* randomization might put this image into an overlapping
* position with the ELF binary image. (since size < total_size)
* So we first map the 'big' image - and unmap the remainder at
* the end. (which unmap is needed for ELF images with holes.)
*/
if (total_size) {
total_size = ELF_PAGEALIGN(total_size);
map_addr = do_mmap(filep, addr, total_size, prot, type, off);
if (!BAD_ADDR(map_addr))
do_munmap(current->mm, map_addr+size, total_size-size);
} else
map_addr = do_mmap(filep, addr, size, prot, type, off);
up_write(¤t->mm->mmap_sem);
return(map_addr);
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,139 | static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
do
i += 2;
while (auxv[i - 2] != AT_NULL);
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
| DoS | 0 | static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
{
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
int i = 0;
do
i += 2;
while (auxv[i - 2] != AT_NULL);
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,140 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
{
phdr->p_type = PT_NOTE;
phdr->p_offset = offset;
phdr->p_vaddr = 0;
phdr->p_paddr = 0;
phdr->p_filesz = sz;
phdr->p_memsz = 0;
phdr->p_flags = 0;
phdr->p_align = 0;
return;
}
| DoS | 0 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
{
phdr->p_type = PT_NOTE;
phdr->p_offset = offset;
phdr->p_vaddr = 0;
phdr->p_paddr = 0;
phdr->p_filesz = sz;
phdr->p_memsz = 0;
phdr->p_flags = 0;
phdr->p_align = 0;
return;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,141 | static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
elf_addr_t e_shoff, int segs)
{
elf->e_shoff = e_shoff;
elf->e_shentsize = sizeof(*shdr4extnum);
elf->e_shnum = 1;
elf->e_shstrndx = SHN_UNDEF;
memset(shdr4extnum, 0, sizeof(*shdr4extnum));
shdr4extnum->sh_type = SHT_NULL;
shdr4extnum->sh_size = elf->e_shnum;
shdr4extnum->sh_link = elf->e_shstrndx;
shdr4extnum->sh_info = segs;
}
| DoS | 0 | static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
elf_addr_t e_shoff, int segs)
{
elf->e_shoff = e_shoff;
elf->e_shentsize = sizeof(*shdr4extnum);
elf->e_shnum = 1;
elf->e_shstrndx = SHN_UNDEF;
memset(shdr4extnum, 0, sizeof(*shdr4extnum));
shdr4extnum->sh_type = SHT_NULL;
shdr4extnum->sh_size = elf->e_shnum;
shdr4extnum->sh_link = elf->e_shstrndx;
shdr4extnum->sh_info = segs;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,142 | static void fill_note(struct memelfnote *note, const char *name, int type,
unsigned int sz, void *data)
{
note->name = name;
note->type = type;
note->datasz = sz;
note->data = data;
return;
}
| DoS | 0 | static void fill_note(struct memelfnote *note, const char *name, int type,
unsigned int sz, void *data)
{
note->name = name;
note->type = type;
note->datasz = sz;
note->data = data;
return;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,143 | static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
long signr, struct pt_regs *regs)
{
struct task_struct *dump_task = current;
const struct user_regset_view *view = task_user_regset_view(dump_task);
struct elf_thread_core_info *t;
struct elf_prpsinfo *psinfo;
struct core_thread *ct;
unsigned int i;
info->size = 0;
info->thread = NULL;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
if (psinfo == NULL)
return 0;
fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
/*
* Figure out how many notes we're going to need for each thread.
*/
info->thread_notes = 0;
for (i = 0; i < view->n; ++i)
if (view->regsets[i].core_note_type != 0)
++info->thread_notes;
/*
* Sanity check. We rely on regset 0 being in NT_PRSTATUS,
* since it is our one special case.
*/
if (unlikely(info->thread_notes == 0) ||
unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
WARN_ON(1);
return 0;
}
/*
* Initialize the ELF file header.
*/
fill_elf_header(elf, phdrs,
view->e_machine, view->e_flags, view->ei_osabi);
/*
* Allocate a structure for each thread.
*/
for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
t = kzalloc(offsetof(struct elf_thread_core_info,
notes[info->thread_notes]),
GFP_KERNEL);
if (unlikely(!t))
return 0;
t->task = ct->task;
if (ct->task == dump_task || !info->thread) {
t->next = info->thread;
info->thread = t;
} else {
/*
* Make sure to keep the original task at
* the head of the list.
*/
t->next = info->thread->next;
info->thread->next = t;
}
}
/*
* Now fill in each thread's information.
*/
for (t = info->thread; t != NULL; t = t->next)
if (!fill_thread_core_info(t, view, signr, &info->size))
return 0;
/*
* Fill in the two process-wide notes.
*/
fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
info->size += notesize(&info->psinfo);
fill_auxv_note(&info->auxv, current->mm);
info->size += notesize(&info->auxv);
return 1;
}
| DoS | 0 | static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
long signr, struct pt_regs *regs)
{
struct task_struct *dump_task = current;
const struct user_regset_view *view = task_user_regset_view(dump_task);
struct elf_thread_core_info *t;
struct elf_prpsinfo *psinfo;
struct core_thread *ct;
unsigned int i;
info->size = 0;
info->thread = NULL;
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
if (psinfo == NULL)
return 0;
fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
/*
* Figure out how many notes we're going to need for each thread.
*/
info->thread_notes = 0;
for (i = 0; i < view->n; ++i)
if (view->regsets[i].core_note_type != 0)
++info->thread_notes;
/*
* Sanity check. We rely on regset 0 being in NT_PRSTATUS,
* since it is our one special case.
*/
if (unlikely(info->thread_notes == 0) ||
unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
WARN_ON(1);
return 0;
}
/*
* Initialize the ELF file header.
*/
fill_elf_header(elf, phdrs,
view->e_machine, view->e_flags, view->ei_osabi);
/*
* Allocate a structure for each thread.
*/
for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
t = kzalloc(offsetof(struct elf_thread_core_info,
notes[info->thread_notes]),
GFP_KERNEL);
if (unlikely(!t))
return 0;
t->task = ct->task;
if (ct->task == dump_task || !info->thread) {
t->next = info->thread;
info->thread = t;
} else {
/*
* Make sure to keep the original task at
* the head of the list.
*/
t->next = info->thread->next;
info->thread->next = t;
}
}
/*
* Now fill in each thread's information.
*/
for (t = info->thread; t != NULL; t = t->next)
if (!fill_thread_core_info(t, view, signr, &info->size))
return 0;
/*
* Fill in the two process-wide notes.
*/
fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
info->size += notesize(&info->psinfo);
fill_auxv_note(&info->auxv, current->mm);
info->size += notesize(&info->auxv);
return 1;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,144 | static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
long signr, struct pt_regs *regs)
{
struct list_head *t;
if (!elf_note_info_init(info))
return 0;
if (signr) {
struct core_thread *ct;
struct elf_thread_status *ets;
for (ct = current->mm->core_state->dumper.next;
ct; ct = ct->next) {
ets = kzalloc(sizeof(*ets), GFP_KERNEL);
if (!ets)
return 0;
ets->thread = ct->task;
list_add(&ets->list, &info->thread_list);
}
list_for_each(t, &info->thread_list) {
int sz;
ets = list_entry(t, struct elf_thread_status, list);
sz = elf_dump_thread_status(signr, ets);
info->thread_status_size += sz;
}
}
/* now collect the dump for the current */
memset(info->prstatus, 0, sizeof(*info->prstatus));
fill_prstatus(info->prstatus, current, signr);
elf_core_copy_regs(&info->prstatus->pr_reg, regs);
/* Set up header */
fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS, ELF_OSABI);
/*
* Set up the notes in similar form to SVR4 core dumps made
* with info from their /proc.
*/
fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
sizeof(*info->prstatus), info->prstatus);
fill_psinfo(info->psinfo, current->group_leader, current->mm);
fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
sizeof(*info->psinfo), info->psinfo);
info->numnote = 2;
fill_auxv_note(&info->notes[info->numnote++], current->mm);
/* Try to dump the FPU. */
info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
info->fpu);
if (info->prstatus->pr_fpvalid)
fill_note(info->notes + info->numnote++,
"CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
#ifdef ELF_CORE_COPY_XFPREGS
if (elf_core_copy_task_xfpregs(current, info->xfpu))
fill_note(info->notes + info->numnote++,
"LINUX", ELF_CORE_XFPREG_TYPE,
sizeof(*info->xfpu), info->xfpu);
#endif
return 1;
}
| DoS | 0 | static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
long signr, struct pt_regs *regs)
{
struct list_head *t;
if (!elf_note_info_init(info))
return 0;
if (signr) {
struct core_thread *ct;
struct elf_thread_status *ets;
for (ct = current->mm->core_state->dumper.next;
ct; ct = ct->next) {
ets = kzalloc(sizeof(*ets), GFP_KERNEL);
if (!ets)
return 0;
ets->thread = ct->task;
list_add(&ets->list, &info->thread_list);
}
list_for_each(t, &info->thread_list) {
int sz;
ets = list_entry(t, struct elf_thread_status, list);
sz = elf_dump_thread_status(signr, ets);
info->thread_status_size += sz;
}
}
/* now collect the dump for the current */
memset(info->prstatus, 0, sizeof(*info->prstatus));
fill_prstatus(info->prstatus, current, signr);
elf_core_copy_regs(&info->prstatus->pr_reg, regs);
/* Set up header */
fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS, ELF_OSABI);
/*
* Set up the notes in similar form to SVR4 core dumps made
* with info from their /proc.
*/
fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
sizeof(*info->prstatus), info->prstatus);
fill_psinfo(info->psinfo, current->group_leader, current->mm);
fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
sizeof(*info->psinfo), info->psinfo);
info->numnote = 2;
fill_auxv_note(&info->notes[info->numnote++], current->mm);
/* Try to dump the FPU. */
info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
info->fpu);
if (info->prstatus->pr_fpvalid)
fill_note(info->notes + info->numnote++,
"CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
#ifdef ELF_CORE_COPY_XFPREGS
if (elf_core_copy_task_xfpregs(current, info->xfpu))
fill_note(info->notes + info->numnote++,
"LINUX", ELF_CORE_XFPREG_TYPE,
sizeof(*info->xfpu), info->xfpu);
#endif
return 1;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,145 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
struct mm_struct *mm)
{
const struct cred *cred;
unsigned int i, len;
/* first copy the parameters from user space */
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
len = mm->arg_end - mm->arg_start;
if (len >= ELF_PRARGSZ)
len = ELF_PRARGSZ-1;
if (copy_from_user(&psinfo->pr_psargs,
(const char __user *)mm->arg_start, len))
return -EFAULT;
for(i = 0; i < len; i++)
if (psinfo->pr_psargs[i] == 0)
psinfo->pr_psargs[i] = ' ';
psinfo->pr_psargs[len] = 0;
rcu_read_lock();
psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
rcu_read_unlock();
psinfo->pr_pid = task_pid_vnr(p);
psinfo->pr_pgrp = task_pgrp_vnr(p);
psinfo->pr_sid = task_session_vnr(p);
i = p->state ? ffz(~p->state) + 1 : 0;
psinfo->pr_state = i;
psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
psinfo->pr_zomb = psinfo->pr_sname == 'Z';
psinfo->pr_nice = task_nice(p);
psinfo->pr_flag = p->flags;
rcu_read_lock();
cred = __task_cred(p);
SET_UID(psinfo->pr_uid, cred->uid);
SET_GID(psinfo->pr_gid, cred->gid);
rcu_read_unlock();
strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
return 0;
}
| DoS | 0 | static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
struct mm_struct *mm)
{
const struct cred *cred;
unsigned int i, len;
/* first copy the parameters from user space */
memset(psinfo, 0, sizeof(struct elf_prpsinfo));
len = mm->arg_end - mm->arg_start;
if (len >= ELF_PRARGSZ)
len = ELF_PRARGSZ-1;
if (copy_from_user(&psinfo->pr_psargs,
(const char __user *)mm->arg_start, len))
return -EFAULT;
for(i = 0; i < len; i++)
if (psinfo->pr_psargs[i] == 0)
psinfo->pr_psargs[i] = ' ';
psinfo->pr_psargs[len] = 0;
rcu_read_lock();
psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
rcu_read_unlock();
psinfo->pr_pid = task_pid_vnr(p);
psinfo->pr_pgrp = task_pgrp_vnr(p);
psinfo->pr_sid = task_session_vnr(p);
i = p->state ? ffz(~p->state) + 1 : 0;
psinfo->pr_state = i;
psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
psinfo->pr_zomb = psinfo->pr_sname == 'Z';
psinfo->pr_nice = task_nice(p);
psinfo->pr_flag = p->flags;
rcu_read_lock();
cred = __task_cred(p);
SET_UID(psinfo->pr_uid, cred->uid);
SET_GID(psinfo->pr_gid, cred->gid);
rcu_read_unlock();
strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
return 0;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,146 | static struct vm_area_struct *first_vma(struct task_struct *tsk,
struct vm_area_struct *gate_vma)
{
struct vm_area_struct *ret = tsk->mm->mmap;
if (ret)
return ret;
return gate_vma;
}
| DoS | 0 | static struct vm_area_struct *first_vma(struct task_struct *tsk,
struct vm_area_struct *gate_vma)
{
struct vm_area_struct *ret = tsk->mm->mmap;
if (ret)
return ret;
return gate_vma;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,147 | static void free_note_info(struct elf_note_info *info)
{
struct elf_thread_core_info *threads = info->thread;
while (threads) {
unsigned int i;
struct elf_thread_core_info *t = threads;
threads = t->next;
WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
for (i = 1; i < info->thread_notes; ++i)
kfree(t->notes[i].data);
kfree(t);
}
kfree(info->psinfo.data);
}
| DoS | 0 | static void free_note_info(struct elf_note_info *info)
{
struct elf_thread_core_info *threads = info->thread;
while (threads) {
unsigned int i;
struct elf_thread_core_info *t = threads;
threads = t->next;
WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
for (i = 1; i < info->thread_notes; ++i)
kfree(t->notes[i].data);
kfree(t);
}
kfree(info->psinfo.data);
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,148 | static void free_note_info(struct elf_note_info *info)
{
while (!list_empty(&info->thread_list)) {
struct list_head *tmp = info->thread_list.next;
list_del(tmp);
kfree(list_entry(tmp, struct elf_thread_status, list));
}
kfree(info->prstatus);
kfree(info->psinfo);
kfree(info->notes);
kfree(info->fpu);
#ifdef ELF_CORE_COPY_XFPREGS
kfree(info->xfpu);
#endif
}
| DoS | 0 | static void free_note_info(struct elf_note_info *info)
{
while (!list_empty(&info->thread_list)) {
struct list_head *tmp = info->thread_list.next;
list_del(tmp);
kfree(list_entry(tmp, struct elf_thread_status, list));
}
kfree(info->prstatus);
kfree(info->psinfo);
kfree(info->notes);
kfree(info->fpu);
#ifdef ELF_CORE_COPY_XFPREGS
kfree(info->xfpu);
#endif
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,149 | static size_t get_note_info_size(struct elf_note_info *info)
{
int sz = 0;
int i;
for (i = 0; i < info->numnote; i++)
sz += notesize(info->notes + i);
sz += info->thread_status_size;
return sz;
}
| DoS | 0 | static size_t get_note_info_size(struct elf_note_info *info)
{
int sz = 0;
int i;
for (i = 0; i < info->numnote; i++)
sz += notesize(info->notes + i);
sz += info->thread_status_size;
return sz;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,150 | static int __init init_elf_binfmt(void)
{
return register_binfmt(&elf_format);
}
| DoS | 0 | static int __init init_elf_binfmt(void)
{
return register_binfmt(&elf_format);
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,151 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
struct file *interpreter = NULL; /* to shut gcc up */
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
unsigned long error;
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
unsigned int size;
unsigned long elf_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
unsigned long def_flags = 0;
struct {
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
retval = -ENOMEM;
goto out_ret;
}
/* Get the exec-header */
loc->elf_ex = *((struct elfhdr *)bprm->buf);
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out;
if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
goto out;
if (!elf_check_arch(&loc->elf_ex))
goto out;
if (!bprm->file->f_op || !bprm->file->f_op->mmap)
goto out;
/* Now read in all of the header information */
if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
goto out;
if (loc->elf_ex.e_phnum < 1 ||
loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
goto out;
size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
retval = -ENOMEM;
elf_phdata = kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
(char *)elf_phdata, size);
if (retval != size) {
if (retval >= 0)
retval = -EIO;
goto out_free_ph;
}
elf_ppnt = elf_phdata;
elf_bss = 0;
elf_brk = 0;
start_code = ~0UL;
end_code = 0;
start_data = 0;
end_data = 0;
for (i = 0; i < loc->elf_ex.e_phnum; i++) {
if (elf_ppnt->p_type == PT_INTERP) {
/* This is the program interpreter used for
* shared libraries - for now assume that this
* is an a.out format binary
*/
retval = -ENOEXEC;
if (elf_ppnt->p_filesz > PATH_MAX ||
elf_ppnt->p_filesz < 2)
goto out_free_ph;
retval = -ENOMEM;
elf_interpreter = kmalloc(elf_ppnt->p_filesz,
GFP_KERNEL);
if (!elf_interpreter)
goto out_free_ph;
retval = kernel_read(bprm->file, elf_ppnt->p_offset,
elf_interpreter,
elf_ppnt->p_filesz);
if (retval != elf_ppnt->p_filesz) {
if (retval >= 0)
retval = -EIO;
goto out_free_interp;
}
/* make sure path is NULL terminated */
retval = -ENOEXEC;
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
goto out_free_interp;
interpreter = open_exec(elf_interpreter);
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
goto out_free_interp;
/*
* If the binary is not readable then enforce
* mm->dumpable = 0 regardless of the interpreter's
* permissions.
*/
would_dump(bprm, interpreter);
retval = kernel_read(interpreter, 0, bprm->buf,
BINPRM_BUF_SIZE);
if (retval != BINPRM_BUF_SIZE) {
if (retval >= 0)
retval = -EIO;
goto out_free_dentry;
}
/* Get the exec headers */
loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
break;
}
elf_ppnt++;
}
elf_ppnt = elf_phdata;
for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
if (elf_ppnt->p_type == PT_GNU_STACK) {
if (elf_ppnt->p_flags & PF_X)
executable_stack = EXSTACK_ENABLE_X;
else
executable_stack = EXSTACK_DISABLE_X;
break;
}
/* Some simple consistency checks for the interpreter */
if (elf_interpreter) {
retval = -ELIBBAD;
/* Not an ELF interpreter */
if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out_free_dentry;
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
}
/* Flush all traces of the currently running executable */
retval = flush_old_exec(bprm);
if (retval)
goto out_free_dentry;
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
SET_PERSONALITY(loc->elf_ex);
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
current->mm->free_area_cache = current->mm->mmap_base;
current->mm->cached_hole_size = 0;
retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
current->mm->start_stack = bprm->p;
/* Now we do a little grungy work by mmapping the ELF image into
the correct location in memory. */
for(i = 0, elf_ppnt = elf_phdata;
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
if (elf_ppnt->p_type != PT_LOAD)
continue;
if (unlikely (elf_brk > elf_bss)) {
unsigned long nbyte;
/* There was a PT_LOAD segment with p_memsz > p_filesz
before this one. Map anonymous pages, if needed,
and clear the area. */
retval = set_brk(elf_bss + load_bias,
elf_brk + load_bias);
if (retval) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
nbyte = ELF_PAGEOFFSET(elf_bss);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
if (nbyte > elf_brk - elf_bss)
nbyte = elf_brk - elf_bss;
if (clear_user((void __user *)elf_bss +
load_bias, nbyte)) {
/*
* This bss-zeroing can fail if the ELF
* file specifies odd protections. So
* we don't check the return value
*/
}
}
}
if (elf_ppnt->p_flags & PF_R)
elf_prot |= PROT_READ;
if (elf_ppnt->p_flags & PF_W)
elf_prot |= PROT_WRITE;
if (elf_ppnt->p_flags & PF_X)
elf_prot |= PROT_EXEC;
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
/* Try and get dynamic programs out of the way of the
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
/* Memory randomization might have been switched off
* in runtime via sysctl.
* If that is the case, retain the original non-zero
* load_bias value in order to establish proper
* non-randomized mappings.
*/
if (current->flags & PF_RANDOMIZE)
load_bias = 0;
else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
elf_prot, elf_flags, 0);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
retval = IS_ERR((void *)error) ?
PTR_ERR((void*)error) : -EINVAL;
goto out_free_dentry;
}
if (!load_addr_set) {
load_addr_set = 1;
load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
if (loc->elf_ex.e_type == ET_DYN) {
load_bias += error -
ELF_PAGESTART(load_bias + vaddr);
load_addr += load_bias;
reloc_func_desc = load_bias;
}
}
k = elf_ppnt->p_vaddr;
if (k < start_code)
start_code = k;
if (start_data < k)
start_data = k;
/*
* Check to see if the section's size will overflow the
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
elf_ppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - elf_ppnt->p_memsz < k) {
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
goto out_free_dentry;
}
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
if (k > elf_bss)
elf_bss = k;
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
end_code = k;
if (end_data < k)
end_data = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
if (k > elf_brk)
elf_brk = k;
}
loc->elf_ex.e_entry += load_bias;
elf_bss += load_bias;
elf_brk += load_bias;
start_code += load_bias;
end_code += load_bias;
start_data += load_bias;
end_data += load_bias;
/* Calling set_brk effectively mmaps the pages that we need
* for the bss and break sections. We must do this before
* mapping in the interpreter, to make sure it doesn't wind
* up getting placed where the bss needs to go.
*/
retval = set_brk(elf_bss, elf_brk);
if (retval) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
send_sig(SIGSEGV, current, 0);
retval = -EFAULT; /* Nobody gets to see this, but.. */
goto out_free_dentry;
}
if (elf_interpreter) {
unsigned long uninitialized_var(interp_map_addr);
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
&interp_map_addr,
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
* load_elf_interp() returns relocation
* adjustment
*/
interp_load_addr = elf_entry;
elf_entry += loc->interp_elf_ex.e_entry;
}
if (BAD_ADDR(elf_entry)) {
force_sig(SIGSEGV, current);
retval = IS_ERR((void *)elf_entry) ?
(int)elf_entry : -EINVAL;
goto out_free_dentry;
}
reloc_func_desc = interp_load_addr;
allow_write_access(interpreter);
fput(interpreter);
kfree(elf_interpreter);
} else {
elf_entry = loc->elf_ex.e_entry;
if (BAD_ADDR(elf_entry)) {
force_sig(SIGSEGV, current);
retval = -EINVAL;
goto out_free_dentry;
}
}
kfree(elf_phdata);
set_binfmt(&elf_format);
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out;
}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
retval = create_elf_tables(bprm, &loc->elf_ex,
load_addr, interp_load_addr);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out;
}
/* N.B. passed_fileno might not be initialized? */
current->mm->end_code = end_code;
current->mm->start_code = start_code;
current->mm->start_data = start_data;
current->mm->end_data = end_data;
current->mm->start_stack = bprm->p;
#ifdef arch_randomize_brk
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
current->mm->brk = current->mm->start_brk =
arch_randomize_brk(current->mm);
#ifdef CONFIG_COMPAT_BRK
current->brk_randomized = 1;
#endif
}
#endif
if (current->personality & MMAP_PAGE_ZERO) {
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */
down_write(¤t->mm->mmap_sem);
error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
up_write(¤t->mm->mmap_sem);
}
#ifdef ELF_PLAT_INIT
/*
* The ABI may specify that certain registers be set up in special
* ways (on i386 %edx is the address of a DT_FINI function, for
* example. In addition, it may also specify (eg, PowerPC64 ELF)
* that the e_entry field is the address of the function descriptor
* for the startup routine, rather than the address of the startup
* routine itself. This macro performs whatever initialization to
* the regs structure is required as well as any relocations to the
* function descriptor entries when executing dynamically links apps.
*/
ELF_PLAT_INIT(regs, reloc_func_desc);
#endif
start_thread(regs, elf_entry, bprm->p);
retval = 0;
out:
kfree(loc);
out_ret:
return retval;
/* error cleanup */
out_free_dentry:
allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_interp:
kfree(elf_interpreter);
out_free_ph:
kfree(elf_phdata);
goto out;
}
| DoS | 0 | static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
struct file *interpreter = NULL; /* to shut gcc up */
unsigned long load_addr = 0, load_bias = 0;
int load_addr_set = 0;
char * elf_interpreter = NULL;
unsigned long error;
struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int retval, i;
unsigned int size;
unsigned long elf_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
unsigned long reloc_func_desc __maybe_unused = 0;
int executable_stack = EXSTACK_DEFAULT;
unsigned long def_flags = 0;
struct {
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
retval = -ENOMEM;
goto out_ret;
}
/* Get the exec-header */
loc->elf_ex = *((struct elfhdr *)bprm->buf);
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out;
if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
goto out;
if (!elf_check_arch(&loc->elf_ex))
goto out;
if (!bprm->file->f_op || !bprm->file->f_op->mmap)
goto out;
/* Now read in all of the header information */
if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
goto out;
if (loc->elf_ex.e_phnum < 1 ||
loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
goto out;
size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
retval = -ENOMEM;
elf_phdata = kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
(char *)elf_phdata, size);
if (retval != size) {
if (retval >= 0)
retval = -EIO;
goto out_free_ph;
}
elf_ppnt = elf_phdata;
elf_bss = 0;
elf_brk = 0;
start_code = ~0UL;
end_code = 0;
start_data = 0;
end_data = 0;
for (i = 0; i < loc->elf_ex.e_phnum; i++) {
if (elf_ppnt->p_type == PT_INTERP) {
/* This is the program interpreter used for
* shared libraries - for now assume that this
* is an a.out format binary
*/
retval = -ENOEXEC;
if (elf_ppnt->p_filesz > PATH_MAX ||
elf_ppnt->p_filesz < 2)
goto out_free_ph;
retval = -ENOMEM;
elf_interpreter = kmalloc(elf_ppnt->p_filesz,
GFP_KERNEL);
if (!elf_interpreter)
goto out_free_ph;
retval = kernel_read(bprm->file, elf_ppnt->p_offset,
elf_interpreter,
elf_ppnt->p_filesz);
if (retval != elf_ppnt->p_filesz) {
if (retval >= 0)
retval = -EIO;
goto out_free_interp;
}
/* make sure path is NULL terminated */
retval = -ENOEXEC;
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
goto out_free_interp;
interpreter = open_exec(elf_interpreter);
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
goto out_free_interp;
/*
* If the binary is not readable then enforce
* mm->dumpable = 0 regardless of the interpreter's
* permissions.
*/
would_dump(bprm, interpreter);
retval = kernel_read(interpreter, 0, bprm->buf,
BINPRM_BUF_SIZE);
if (retval != BINPRM_BUF_SIZE) {
if (retval >= 0)
retval = -EIO;
goto out_free_dentry;
}
/* Get the exec headers */
loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
break;
}
elf_ppnt++;
}
elf_ppnt = elf_phdata;
for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
if (elf_ppnt->p_type == PT_GNU_STACK) {
if (elf_ppnt->p_flags & PF_X)
executable_stack = EXSTACK_ENABLE_X;
else
executable_stack = EXSTACK_DISABLE_X;
break;
}
/* Some simple consistency checks for the interpreter */
if (elf_interpreter) {
retval = -ELIBBAD;
/* Not an ELF interpreter */
if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out_free_dentry;
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
}
/* Flush all traces of the currently running executable */
retval = flush_old_exec(bprm);
if (retval)
goto out_free_dentry;
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->mm->def_flags = def_flags;
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
SET_PERSONALITY(loc->elf_ex);
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
current->mm->free_area_cache = current->mm->mmap_base;
current->mm->cached_hole_size = 0;
retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
executable_stack);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
current->mm->start_stack = bprm->p;
/* Now we do a little grungy work by mmapping the ELF image into
the correct location in memory. */
for(i = 0, elf_ppnt = elf_phdata;
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
if (elf_ppnt->p_type != PT_LOAD)
continue;
if (unlikely (elf_brk > elf_bss)) {
unsigned long nbyte;
/* There was a PT_LOAD segment with p_memsz > p_filesz
before this one. Map anonymous pages, if needed,
and clear the area. */
retval = set_brk(elf_bss + load_bias,
elf_brk + load_bias);
if (retval) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
nbyte = ELF_PAGEOFFSET(elf_bss);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
if (nbyte > elf_brk - elf_bss)
nbyte = elf_brk - elf_bss;
if (clear_user((void __user *)elf_bss +
load_bias, nbyte)) {
/*
* This bss-zeroing can fail if the ELF
* file specifies odd protections. So
* we don't check the return value
*/
}
}
}
if (elf_ppnt->p_flags & PF_R)
elf_prot |= PROT_READ;
if (elf_ppnt->p_flags & PF_W)
elf_prot |= PROT_WRITE;
if (elf_ppnt->p_flags & PF_X)
elf_prot |= PROT_EXEC;
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
/* Try and get dynamic programs out of the way of the
* default mmap base, as well as whatever program they
* might try to exec. This is because the brk will
* follow the loader, and is not movable. */
#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
/* Memory randomization might have been switched off
* in runtime via sysctl.
* If that is the case, retain the original non-zero
* load_bias value in order to establish proper
* non-randomized mappings.
*/
if (current->flags & PF_RANDOMIZE)
load_bias = 0;
else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
}
error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
elf_prot, elf_flags, 0);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
retval = IS_ERR((void *)error) ?
PTR_ERR((void*)error) : -EINVAL;
goto out_free_dentry;
}
if (!load_addr_set) {
load_addr_set = 1;
load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
if (loc->elf_ex.e_type == ET_DYN) {
load_bias += error -
ELF_PAGESTART(load_bias + vaddr);
load_addr += load_bias;
reloc_func_desc = load_bias;
}
}
k = elf_ppnt->p_vaddr;
if (k < start_code)
start_code = k;
if (start_data < k)
start_data = k;
/*
* Check to see if the section's size will overflow the
* allowed task size. Note that p_filesz must always be
* <= p_memsz so it is only necessary to check p_memsz.
*/
if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
elf_ppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - elf_ppnt->p_memsz < k) {
/* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
retval = -EINVAL;
goto out_free_dentry;
}
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
if (k > elf_bss)
elf_bss = k;
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
end_code = k;
if (end_data < k)
end_data = k;
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
if (k > elf_brk)
elf_brk = k;
}
loc->elf_ex.e_entry += load_bias;
elf_bss += load_bias;
elf_brk += load_bias;
start_code += load_bias;
end_code += load_bias;
start_data += load_bias;
end_data += load_bias;
/* Calling set_brk effectively mmaps the pages that we need
* for the bss and break sections. We must do this before
* mapping in the interpreter, to make sure it doesn't wind
* up getting placed where the bss needs to go.
*/
retval = set_brk(elf_bss, elf_brk);
if (retval) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
send_sig(SIGSEGV, current, 0);
retval = -EFAULT; /* Nobody gets to see this, but.. */
goto out_free_dentry;
}
if (elf_interpreter) {
unsigned long uninitialized_var(interp_map_addr);
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
&interp_map_addr,
load_bias);
if (!IS_ERR((void *)elf_entry)) {
/*
* load_elf_interp() returns relocation
* adjustment
*/
interp_load_addr = elf_entry;
elf_entry += loc->interp_elf_ex.e_entry;
}
if (BAD_ADDR(elf_entry)) {
force_sig(SIGSEGV, current);
retval = IS_ERR((void *)elf_entry) ?
(int)elf_entry : -EINVAL;
goto out_free_dentry;
}
reloc_func_desc = interp_load_addr;
allow_write_access(interpreter);
fput(interpreter);
kfree(elf_interpreter);
} else {
elf_entry = loc->elf_ex.e_entry;
if (BAD_ADDR(elf_entry)) {
force_sig(SIGSEGV, current);
retval = -EINVAL;
goto out_free_dentry;
}
}
kfree(elf_phdata);
set_binfmt(&elf_format);
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out;
}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
install_exec_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
retval = create_elf_tables(bprm, &loc->elf_ex,
load_addr, interp_load_addr);
if (retval < 0) {
send_sig(SIGKILL, current, 0);
goto out;
}
/* N.B. passed_fileno might not be initialized? */
current->mm->end_code = end_code;
current->mm->start_code = start_code;
current->mm->start_data = start_data;
current->mm->end_data = end_data;
current->mm->start_stack = bprm->p;
#ifdef arch_randomize_brk
if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
current->mm->brk = current->mm->start_brk =
arch_randomize_brk(current->mm);
#ifdef CONFIG_COMPAT_BRK
current->brk_randomized = 1;
#endif
}
#endif
if (current->personality & MMAP_PAGE_ZERO) {
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */
down_write(¤t->mm->mmap_sem);
error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
up_write(¤t->mm->mmap_sem);
}
#ifdef ELF_PLAT_INIT
/*
* The ABI may specify that certain registers be set up in special
* ways (on i386 %edx is the address of a DT_FINI function, for
* example. In addition, it may also specify (eg, PowerPC64 ELF)
* that the e_entry field is the address of the function descriptor
* for the startup routine, rather than the address of the startup
* routine itself. This macro performs whatever initialization to
* the regs structure is required as well as any relocations to the
* function descriptor entries when executing dynamically links apps.
*/
ELF_PLAT_INIT(regs, reloc_func_desc);
#endif
start_thread(regs, elf_entry, bprm->p);
retval = 0;
out:
kfree(loc);
out_ret:
return retval;
/* error cleanup */
out_free_dentry:
allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
out_free_interp:
kfree(elf_interpreter);
out_free_ph:
kfree(elf_phdata);
goto out;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,152 | static int load_elf_library(struct file *file)
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
unsigned long elf_bss, bss, len;
int retval, error, i, j;
struct elfhdr elf_ex;
error = -ENOEXEC;
retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
if (retval != sizeof(elf_ex))
goto out;
if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out;
/* First of all, some simple consistency checks */
if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
!elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
goto out;
/* Now read in all of the header information */
j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
error = -ENOMEM;
elf_phdata = kmalloc(j, GFP_KERNEL);
if (!elf_phdata)
goto out;
eppnt = elf_phdata;
error = -ENOEXEC;
retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
if (retval != j)
goto out_free_ph;
for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
if ((eppnt + i)->p_type == PT_LOAD)
j++;
if (j != 1)
goto out_free_ph;
while (eppnt->p_type != PT_LOAD)
eppnt++;
/* Now use mmap to map the library into memory. */
down_write(¤t->mm->mmap_sem);
error = do_mmap(file,
ELF_PAGESTART(eppnt->p_vaddr),
(eppnt->p_filesz +
ELF_PAGEOFFSET(eppnt->p_vaddr)),
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
(eppnt->p_offset -
ELF_PAGEOFFSET(eppnt->p_vaddr)));
up_write(¤t->mm->mmap_sem);
if (error != ELF_PAGESTART(eppnt->p_vaddr))
goto out_free_ph;
elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
if (padzero(elf_bss)) {
error = -EFAULT;
goto out_free_ph;
}
len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
ELF_MIN_ALIGN - 1);
bss = eppnt->p_memsz + eppnt->p_vaddr;
if (bss > len) {
down_write(¤t->mm->mmap_sem);
do_brk(len, bss - len);
up_write(¤t->mm->mmap_sem);
}
error = 0;
out_free_ph:
kfree(elf_phdata);
out:
return error;
}
| DoS | 0 | static int load_elf_library(struct file *file)
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
unsigned long elf_bss, bss, len;
int retval, error, i, j;
struct elfhdr elf_ex;
error = -ENOEXEC;
retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
if (retval != sizeof(elf_ex))
goto out;
if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
goto out;
/* First of all, some simple consistency checks */
if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
!elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
goto out;
/* Now read in all of the header information */
j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
error = -ENOMEM;
elf_phdata = kmalloc(j, GFP_KERNEL);
if (!elf_phdata)
goto out;
eppnt = elf_phdata;
error = -ENOEXEC;
retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
if (retval != j)
goto out_free_ph;
for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
if ((eppnt + i)->p_type == PT_LOAD)
j++;
if (j != 1)
goto out_free_ph;
while (eppnt->p_type != PT_LOAD)
eppnt++;
/* Now use mmap to map the library into memory. */
down_write(¤t->mm->mmap_sem);
error = do_mmap(file,
ELF_PAGESTART(eppnt->p_vaddr),
(eppnt->p_filesz +
ELF_PAGEOFFSET(eppnt->p_vaddr)),
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
(eppnt->p_offset -
ELF_PAGEOFFSET(eppnt->p_vaddr)));
up_write(¤t->mm->mmap_sem);
if (error != ELF_PAGESTART(eppnt->p_vaddr))
goto out_free_ph;
elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
if (padzero(elf_bss)) {
error = -EFAULT;
goto out_free_ph;
}
len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
ELF_MIN_ALIGN - 1);
bss = eppnt->p_memsz + eppnt->p_vaddr;
if (bss > len) {
down_write(¤t->mm->mmap_sem);
do_brk(len, bss - len);
up_write(¤t->mm->mmap_sem);
}
error = 0;
out_free_ph:
kfree(elf_phdata);
out:
return error;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,153 | static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
struct vm_area_struct *gate_vma)
{
struct vm_area_struct *ret;
ret = this_vma->vm_next;
if (ret)
return ret;
if (this_vma == gate_vma)
return NULL;
return gate_vma;
}
| DoS | 0 | static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
struct vm_area_struct *gate_vma)
{
struct vm_area_struct *ret;
ret = this_vma->vm_next;
if (ret)
return ret;
if (this_vma == gate_vma)
return NULL;
return gate_vma;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,154 | static int notesize(struct memelfnote *en)
{
int sz;
sz = sizeof(struct elf_note);
sz += roundup(strlen(en->name) + 1, 4);
sz += roundup(en->datasz, 4);
return sz;
}
| DoS | 0 | static int notesize(struct memelfnote *en)
{
int sz;
sz = sizeof(struct elf_note);
sz += roundup(strlen(en->name) + 1, 4);
sz += roundup(en->datasz, 4);
return sz;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,155 | static int padzero(unsigned long elf_bss)
{
unsigned long nbyte;
nbyte = ELF_PAGEOFFSET(elf_bss);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
if (clear_user((void __user *) elf_bss, nbyte))
return -EFAULT;
}
return 0;
}
| DoS | 0 | static int padzero(unsigned long elf_bss)
{
unsigned long nbyte;
nbyte = ELF_PAGEOFFSET(elf_bss);
if (nbyte) {
nbyte = ELF_MIN_ALIGN - nbyte;
if (clear_user((void __user *) elf_bss, nbyte))
return -EFAULT;
}
return 0;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,156 | static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
#ifdef CONFIG_STACK_GROWSUP
return PAGE_ALIGN(stack_top) + random_variable;
#else
return PAGE_ALIGN(stack_top) - random_variable;
#endif
}
| DoS | 0 | static unsigned long randomize_stack_top(unsigned long stack_top)
{
unsigned int random_variable = 0;
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_int() & STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
#ifdef CONFIG_STACK_GROWSUP
return PAGE_ALIGN(stack_top) + random_variable;
#else
return PAGE_ALIGN(stack_top) - random_variable;
#endif
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,157 | static int set_brk(unsigned long start, unsigned long end)
{
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
unsigned long addr;
down_write(¤t->mm->mmap_sem);
addr = do_brk(start, end - start);
up_write(¤t->mm->mmap_sem);
if (BAD_ADDR(addr))
return addr;
}
current->mm->start_brk = current->mm->brk = end;
return 0;
}
| DoS | 0 | static int set_brk(unsigned long start, unsigned long end)
{
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
unsigned long addr;
down_write(¤t->mm->mmap_sem);
addr = do_brk(start, end - start);
up_write(¤t->mm->mmap_sem);
if (BAD_ADDR(addr))
return addr;
}
current->mm->start_brk = current->mm->brk = end;
return 0;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,158 | static unsigned long vma_dump_size(struct vm_area_struct *vma,
unsigned long mm_flags)
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
/* The vma can be set up to tell us the answer directly. */
if (vma->vm_flags & VM_ALWAYSDUMP)
goto whole;
/* Hugetlb memory check */
if (vma->vm_flags & VM_HUGETLB) {
if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
goto whole;
if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
goto whole;
}
/* Do not dump I/O mapped devices or special mappings */
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
/* By default, dump shared memory if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED) {
if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
goto whole;
return 0;
}
/* Dump segments that have been written to. */
if (vma->anon_vma && FILTER(ANON_PRIVATE))
goto whole;
if (vma->vm_file == NULL)
return 0;
if (FILTER(MAPPED_PRIVATE))
goto whole;
/*
* If this looks like the beginning of a DSO or executable mapping,
* check for an ELF header. If we find one, dump the first page to
* aid in determining what was mapped here.
*/
if (FILTER(ELF_HEADERS) &&
vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
u32 __user *header = (u32 __user *) vma->vm_start;
u32 word;
mm_segment_t fs = get_fs();
/*
* Doing it this way gets the constant folded by GCC.
*/
union {
u32 cmp;
char elfmag[SELFMAG];
} magic;
BUILD_BUG_ON(SELFMAG != sizeof word);
magic.elfmag[EI_MAG0] = ELFMAG0;
magic.elfmag[EI_MAG1] = ELFMAG1;
magic.elfmag[EI_MAG2] = ELFMAG2;
magic.elfmag[EI_MAG3] = ELFMAG3;
/*
* Switch to the user "segment" for get_user(),
* then put back what elf_core_dump() had in place.
*/
set_fs(USER_DS);
if (unlikely(get_user(word, header)))
word = 0;
set_fs(fs);
if (word == magic.cmp)
return PAGE_SIZE;
}
#undef FILTER
return 0;
whole:
return vma->vm_end - vma->vm_start;
}
| DoS | 0 | static unsigned long vma_dump_size(struct vm_area_struct *vma,
unsigned long mm_flags)
{
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
/* The vma can be set up to tell us the answer directly. */
if (vma->vm_flags & VM_ALWAYSDUMP)
goto whole;
/* Hugetlb memory check */
if (vma->vm_flags & VM_HUGETLB) {
if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
goto whole;
if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
goto whole;
}
/* Do not dump I/O mapped devices or special mappings */
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
/* By default, dump shared memory if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED) {
if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
goto whole;
return 0;
}
/* Dump segments that have been written to. */
if (vma->anon_vma && FILTER(ANON_PRIVATE))
goto whole;
if (vma->vm_file == NULL)
return 0;
if (FILTER(MAPPED_PRIVATE))
goto whole;
/*
* If this looks like the beginning of a DSO or executable mapping,
* check for an ELF header. If we find one, dump the first page to
* aid in determining what was mapped here.
*/
if (FILTER(ELF_HEADERS) &&
vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
u32 __user *header = (u32 __user *) vma->vm_start;
u32 word;
mm_segment_t fs = get_fs();
/*
* Doing it this way gets the constant folded by GCC.
*/
union {
u32 cmp;
char elfmag[SELFMAG];
} magic;
BUILD_BUG_ON(SELFMAG != sizeof word);
magic.elfmag[EI_MAG0] = ELFMAG0;
magic.elfmag[EI_MAG1] = ELFMAG1;
magic.elfmag[EI_MAG2] = ELFMAG2;
magic.elfmag[EI_MAG3] = ELFMAG3;
/*
* Switch to the user "segment" for get_user(),
* then put back what elf_core_dump() had in place.
*/
set_fs(USER_DS);
if (unlikely(get_user(word, header)))
word = 0;
set_fs(fs);
if (word == magic.cmp)
return PAGE_SIZE;
}
#undef FILTER
return 0;
whole:
return vma->vm_end - vma->vm_start;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,159 | static int write_note_info(struct elf_note_info *info,
struct file *file, loff_t *foffset)
{
bool first = 1;
struct elf_thread_core_info *t = info->thread;
do {
int i;
if (!writenote(&t->notes[0], file, foffset))
return 0;
if (first && !writenote(&info->psinfo, file, foffset))
return 0;
if (first && !writenote(&info->auxv, file, foffset))
return 0;
for (i = 1; i < info->thread_notes; ++i)
if (t->notes[i].data &&
!writenote(&t->notes[i], file, foffset))
return 0;
first = 0;
t = t->next;
} while (t);
return 1;
}
| DoS | 0 | static int write_note_info(struct elf_note_info *info,
struct file *file, loff_t *foffset)
{
bool first = 1;
struct elf_thread_core_info *t = info->thread;
do {
int i;
if (!writenote(&t->notes[0], file, foffset))
return 0;
if (first && !writenote(&info->psinfo, file, foffset))
return 0;
if (first && !writenote(&info->auxv, file, foffset))
return 0;
for (i = 1; i < info->thread_notes; ++i)
if (t->notes[i].data &&
!writenote(&t->notes[i], file, foffset))
return 0;
first = 0;
t = t->next;
} while (t);
return 1;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,160 | static int write_note_info(struct elf_note_info *info,
struct file *file, loff_t *foffset)
{
int i;
struct list_head *t;
for (i = 0; i < info->numnote; i++)
if (!writenote(info->notes + i, file, foffset))
return 0;
/* write out the thread status notes section */
list_for_each(t, &info->thread_list) {
struct elf_thread_status *tmp =
list_entry(t, struct elf_thread_status, list);
for (i = 0; i < tmp->num_notes; i++)
if (!writenote(&tmp->notes[i], file, foffset))
return 0;
}
return 1;
}
| DoS | 0 | static int write_note_info(struct elf_note_info *info,
struct file *file, loff_t *foffset)
{
int i;
struct list_head *t;
for (i = 0; i < info->numnote; i++)
if (!writenote(info->notes + i, file, foffset))
return 0;
/* write out the thread status notes section */
list_for_each(t, &info->thread_list) {
struct elf_thread_status *tmp =
list_entry(t, struct elf_thread_status, list);
for (i = 0; i < tmp->num_notes; i++)
if (!writenote(&tmp->notes[i], file, foffset))
return 0;
}
return 1;
}
| @@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
- if (regset->core_note_type &&
+ if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size; | null | null | null |
17,161 | build_path_from_dentry(struct dentry *direntry)
{
struct dentry *temp;
int namelen;
int dfsplen;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
unsigned seq;
dirsep = CIFS_DIR_SEP(cifs_sb);
if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
cifs_bp_rename_retry:
namelen = dfsplen;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
rcu_read_unlock();
return NULL;
}
}
rcu_read_unlock();
full_path = kmalloc(namelen+1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
full_path[namelen] = 0; /* trailing null */
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
spin_lock(&temp->d_lock);
namelen -= 1 + temp->d_name.len;
if (namelen < 0) {
spin_unlock(&temp->d_lock);
break;
} else {
full_path[namelen] = dirsep;
strncpy(full_path + namelen + 1, temp->d_name.name,
temp->d_name.len);
cFYI(0, "name: %s", full_path + namelen);
}
spin_unlock(&temp->d_lock);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
rcu_read_unlock();
kfree(full_path);
return NULL;
}
}
rcu_read_unlock();
if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
cFYI(1, "did not end path lookup where expected. namelen=%d "
"dfsplen=%d", namelen, dfsplen);
/* presumably this is only possible if racing with a rename
of one of the parent directories (we can not lock the dentries
above us to prevent this, but retrying should be harmless) */
kfree(full_path);
goto cifs_bp_rename_retry;
}
/* DIR_SEP already set for byte 0 / vs \ but not for
subsequent slashes in prepath which currently must
be entered the right way - not sure if there is an alternative
since the '\' is a valid posix character so we can not switch
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
if (dfsplen) {
strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
int i;
for (i = 0; i < dfsplen; i++) {
if (full_path[i] == '\\')
full_path[i] = '/';
}
}
}
return full_path;
}
| DoS | 0 | build_path_from_dentry(struct dentry *direntry)
{
struct dentry *temp;
int namelen;
int dfsplen;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
unsigned seq;
dirsep = CIFS_DIR_SEP(cifs_sb);
if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
cifs_bp_rename_retry:
namelen = dfsplen;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
rcu_read_unlock();
return NULL;
}
}
rcu_read_unlock();
full_path = kmalloc(namelen+1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
full_path[namelen] = 0; /* trailing null */
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
spin_lock(&temp->d_lock);
namelen -= 1 + temp->d_name.len;
if (namelen < 0) {
spin_unlock(&temp->d_lock);
break;
} else {
full_path[namelen] = dirsep;
strncpy(full_path + namelen + 1, temp->d_name.name,
temp->d_name.len);
cFYI(0, "name: %s", full_path + namelen);
}
spin_unlock(&temp->d_lock);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
rcu_read_unlock();
kfree(full_path);
return NULL;
}
}
rcu_read_unlock();
if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
cFYI(1, "did not end path lookup where expected. namelen=%d "
"dfsplen=%d", namelen, dfsplen);
/* presumably this is only possible if racing with a rename
of one of the parent directories (we can not lock the dentries
above us to prevent this, but retrying should be harmless) */
kfree(full_path);
goto cifs_bp_rename_retry;
}
/* DIR_SEP already set for byte 0 / vs \ but not for
subsequent slashes in prepath which currently must
be entered the right way - not sure if there is an alternative
since the '\' is a valid posix character so we can not switch
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
if (dfsplen) {
strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
int i;
for (i = 0; i < dfsplen; i++) {
if (full_path[i] == '\\')
full_path[i] = '/';
}
}
}
return full_path;
}
| @@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path, | CWE-264 | null | null |
17,162 | static int cifs_ci_compare(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls;
if ((name->len == len) &&
(nls_strnicmp(codepage, name->name, str, len) == 0))
return 0;
return 1;
}
| DoS | 0 | static int cifs_ci_compare(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls;
if ((name->len == len) &&
(nls_strnicmp(codepage, name->name, str, len) == 0))
return 0;
return 1;
}
| @@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path, | CWE-264 | null | null |
17,163 | static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode,
struct qstr *q)
{
struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
unsigned long hash;
int i;
hash = init_name_hash();
for (i = 0; i < q->len; i++)
hash = partial_name_hash(nls_tolower(codepage, q->name[i]),
hash);
q->hash = end_name_hash(hash);
return 0;
}
| DoS | 0 | static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode,
struct qstr *q)
{
struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
unsigned long hash;
int i;
hash = init_name_hash();
for (i = 0; i < q->len; i++)
hash = partial_name_hash(nls_tolower(codepage, q->name[i]),
hash);
q->hash = end_name_hash(hash);
return 0;
}
| @@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path, | CWE-264 | null | null |
17,164 | cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
{
if (nd && (nd->flags & LOOKUP_RCU))
return -ECHILD;
if (direntry->d_inode) {
if (cifs_revalidate_dentry(direntry))
return 0;
else {
/*
* Forcibly invalidate automounting directory inodes
* (remote DFS directories) so to have them
* instantiated again for automount
*/
if (IS_AUTOMOUNT(direntry->d_inode))
return 0;
return 1;
}
}
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!nd)
return 0;
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled)
return 0;
return 1;
}
| DoS | 0 | cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
{
if (nd && (nd->flags & LOOKUP_RCU))
return -ECHILD;
if (direntry->d_inode) {
if (cifs_revalidate_dentry(direntry))
return 0;
else {
/*
* Forcibly invalidate automounting directory inodes
* (remote DFS directories) so to have them
* instantiated again for automount
*/
if (IS_AUTOMOUNT(direntry->d_inode))
return 0;
return 1;
}
}
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!nd)
return 0;
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled)
return 0;
return 1;
}
| @@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path, | CWE-264 | null | null |
17,165 | int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
dev_t device_number)
{
int rc = -EPERM;
int xid;
int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifs_io_parms io_parms;
char *full_path = NULL;
struct inode *newinode = NULL;
int oplock = 0;
u16 fileHandle;
FILE_ALL_INFO *buf = NULL;
unsigned int bytes_written;
struct win_dev *pdev;
if (!old_valid_dev(device_number))
return -EINVAL;
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto mknod_out;
}
if (pTcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = mode & ~current_umask(),
.ctime = NO_CHANGE_64,
.atime = NO_CHANGE_64,
.mtime = NO_CHANGE_64,
.device = device_number,
};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = (__u64) current_fsuid();
args.gid = (__u64) current_fsgid();
} else {
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc)
goto mknod_out;
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
if (rc == 0)
d_instantiate(direntry, newinode);
goto mknod_out;
}
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
goto mknod_out;
cFYI(1, "sfu compat create special file");
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
kfree(full_path);
rc = -ENOMEM;
FreeXid(xid);
return rc;
}
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
GENERIC_WRITE, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc)
goto mknod_out;
/* BB Do not bother to decode buf since no local inode yet to put
* timestamps in, but we can reuse it safely */
pdev = (struct win_dev *)buf;
io_parms.netfid = fileHandle;
io_parms.pid = current->tgid;
io_parms.tcon = pTcon;
io_parms.offset = 0;
io_parms.length = sizeof(struct win_dev);
if (S_ISCHR(mode)) {
memcpy(pdev->type, "IntxCHR", 8);
pdev->major =
cpu_to_le64(MAJOR(device_number));
pdev->minor =
cpu_to_le64(MINOR(device_number));
rc = CIFSSMBWrite(xid, &io_parms,
&bytes_written, (char *)pdev,
NULL, 0);
} else if (S_ISBLK(mode)) {
memcpy(pdev->type, "IntxBLK", 8);
pdev->major =
cpu_to_le64(MAJOR(device_number));
pdev->minor =
cpu_to_le64(MINOR(device_number));
rc = CIFSSMBWrite(xid, &io_parms,
&bytes_written, (char *)pdev,
NULL, 0);
} /* else if (S_ISFIFO) */
CIFSSMBClose(xid, pTcon, fileHandle);
d_drop(direntry);
/* FIXME: add code here to set EAs */
mknod_out:
kfree(full_path);
kfree(buf);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
| DoS | 0 | int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
dev_t device_number)
{
int rc = -EPERM;
int xid;
int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifs_io_parms io_parms;
char *full_path = NULL;
struct inode *newinode = NULL;
int oplock = 0;
u16 fileHandle;
FILE_ALL_INFO *buf = NULL;
unsigned int bytes_written;
struct win_dev *pdev;
if (!old_valid_dev(device_number))
return -EINVAL;
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto mknod_out;
}
if (pTcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = mode & ~current_umask(),
.ctime = NO_CHANGE_64,
.atime = NO_CHANGE_64,
.mtime = NO_CHANGE_64,
.device = device_number,
};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = (__u64) current_fsuid();
args.gid = (__u64) current_fsgid();
} else {
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc)
goto mknod_out;
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
if (rc == 0)
d_instantiate(direntry, newinode);
goto mknod_out;
}
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
goto mknod_out;
cFYI(1, "sfu compat create special file");
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
kfree(full_path);
rc = -ENOMEM;
FreeXid(xid);
return rc;
}
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
GENERIC_WRITE, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc)
goto mknod_out;
/* BB Do not bother to decode buf since no local inode yet to put
* timestamps in, but we can reuse it safely */
pdev = (struct win_dev *)buf;
io_parms.netfid = fileHandle;
io_parms.pid = current->tgid;
io_parms.tcon = pTcon;
io_parms.offset = 0;
io_parms.length = sizeof(struct win_dev);
if (S_ISCHR(mode)) {
memcpy(pdev->type, "IntxCHR", 8);
pdev->major =
cpu_to_le64(MAJOR(device_number));
pdev->minor =
cpu_to_le64(MINOR(device_number));
rc = CIFSSMBWrite(xid, &io_parms,
&bytes_written, (char *)pdev,
NULL, 0);
} else if (S_ISBLK(mode)) {
memcpy(pdev->type, "IntxBLK", 8);
pdev->major =
cpu_to_le64(MAJOR(device_number));
pdev->minor =
cpu_to_le64(MINOR(device_number));
rc = CIFSSMBWrite(xid, &io_parms,
&bytes_written, (char *)pdev,
NULL, 0);
} /* else if (S_ISFIFO) */
CIFSSMBClose(xid, pTcon, fileHandle);
d_drop(direntry);
/* FIXME: add code here to set EAs */
mknod_out:
kfree(full_path);
kfree(buf);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
| @@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path, | CWE-264 | null | null |
17,166 | renew_parental_timestamps(struct dentry *direntry)
{
/* BB check if there is a way to get the kernel to do this or if we
really need this */
do {
direntry->d_time = jiffies;
direntry = direntry->d_parent;
} while (!IS_ROOT(direntry));
}
| DoS | 0 | renew_parental_timestamps(struct dentry *direntry)
{
/* BB check if there is a way to get the kernel to do this or if we
really need this */
do {
direntry->d_time = jiffies;
direntry = direntry->d_parent;
} while (!IS_ROOT(direntry));
}
| @@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* If either that or op not supported returned, follow
* the normal lookup.
*/
- if ((rc == 0) || (rc == -ENOENT))
+ switch (rc) {
+ case 0:
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ if (newInode && !S_ISREG(newInode->i_mode)) {
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ break;
+ }
+ case -ENOENT:
posix_open = true;
- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+ case -EOPNOTSUPP:
+ break;
+ default:
pTcon->broken_posix_open = true;
+ }
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path, | CWE-264 | null | null |
17,167 | static kadm5_ret_t add_to_history(krb5_context context,
krb5_kvno hist_kvno,
osa_princ_ent_t adb,
kadm5_policy_ent_t pol,
osa_pw_hist_ent *pw)
{
osa_pw_hist_ent *histp;
uint32_t nhist;
unsigned int i, knext, nkeys;
nhist = pol->pw_history_num;
/* A history of 1 means just check the current password */
if (nhist <= 1)
return 0;
if (adb->admin_history_kvno != hist_kvno) {
/* The history key has changed since the last password change, so we
* have to reset the password history. */
free(adb->old_keys);
adb->old_keys = NULL;
adb->old_key_len = 0;
adb->old_key_next = 0;
adb->admin_history_kvno = hist_kvno;
}
nkeys = adb->old_key_len;
knext = adb->old_key_next;
/* resize the adb->old_keys array if necessary */
if (nkeys + 1 < nhist) {
if (adb->old_keys == NULL) {
adb->old_keys = (osa_pw_hist_ent *)
malloc((nkeys + 1) * sizeof (osa_pw_hist_ent));
} else {
adb->old_keys = (osa_pw_hist_ent *)
realloc(adb->old_keys,
(nkeys + 1) * sizeof (osa_pw_hist_ent));
}
if (adb->old_keys == NULL)
return(ENOMEM);
memset(&adb->old_keys[nkeys], 0, sizeof(osa_pw_hist_ent));
nkeys = ++adb->old_key_len;
/*
* To avoid losing old keys, shift forward each entry after
* knext.
*/
for (i = nkeys - 1; i > knext; i--) {
adb->old_keys[i] = adb->old_keys[i - 1];
}
memset(&adb->old_keys[knext], 0, sizeof(osa_pw_hist_ent));
} else if (nkeys + 1 > nhist) {
/*
* The policy must have changed! Shrink the array.
* Can't simply realloc() down, since it might be wrapped.
* To understand the arithmetic below, note that we are
* copying into new positions 0 .. N-1 from old positions
* old_key_next-N .. old_key_next-1, modulo old_key_len,
* where N = pw_history_num - 1 is the length of the
* shortened list. Matt Crawford, FNAL
*/
/*
* M = adb->old_key_len, N = pol->pw_history_num - 1
*
* tmp[0] .. tmp[N-1] = old[(knext-N)%M] .. old[(knext-1)%M]
*/
int j;
osa_pw_hist_t tmp;
tmp = (osa_pw_hist_ent *)
malloc((nhist - 1) * sizeof (osa_pw_hist_ent));
if (tmp == NULL)
return ENOMEM;
for (i = 0; i < nhist - 1; i++) {
/*
* Add nkeys once before taking remainder to avoid
* negative values.
*/
j = (i + nkeys + knext - (nhist - 1)) % nkeys;
tmp[i] = adb->old_keys[j];
}
/* Now free the ones we don't keep (the oldest ones) */
for (i = 0; i < nkeys - (nhist - 1); i++) {
j = (i + nkeys + knext) % nkeys;
histp = &adb->old_keys[j];
for (j = 0; j < histp->n_key_data; j++) {
krb5_free_key_data_contents(context, &histp->key_data[j]);
}
free(histp->key_data);
}
free(adb->old_keys);
adb->old_keys = tmp;
nkeys = adb->old_key_len = nhist - 1;
knext = adb->old_key_next = 0;
}
/*
* If nhist decreased since the last password change, and nkeys+1
* is less than the previous nhist, it is possible for knext to
* index into unallocated space. This condition would not be
* caught by the resizing code above.
*/
if (knext + 1 > nkeys)
knext = adb->old_key_next = 0;
/* free the old pw history entry if it contains data */
histp = &adb->old_keys[knext];
for (i = 0; i < (unsigned int) histp->n_key_data; i++)
krb5_free_key_data_contents(context, &histp->key_data[i]);
free(histp->key_data);
/* store the new entry */
adb->old_keys[knext] = *pw;
/* update the next pointer */
if (++adb->old_key_next == nhist - 1)
adb->old_key_next = 0;
return(0);
}
| DoS | 0 | static kadm5_ret_t add_to_history(krb5_context context,
krb5_kvno hist_kvno,
osa_princ_ent_t adb,
kadm5_policy_ent_t pol,
osa_pw_hist_ent *pw)
{
osa_pw_hist_ent *histp;
uint32_t nhist;
unsigned int i, knext, nkeys;
nhist = pol->pw_history_num;
/* A history of 1 means just check the current password */
if (nhist <= 1)
return 0;
if (adb->admin_history_kvno != hist_kvno) {
/* The history key has changed since the last password change, so we
* have to reset the password history. */
free(adb->old_keys);
adb->old_keys = NULL;
adb->old_key_len = 0;
adb->old_key_next = 0;
adb->admin_history_kvno = hist_kvno;
}
nkeys = adb->old_key_len;
knext = adb->old_key_next;
/* resize the adb->old_keys array if necessary */
if (nkeys + 1 < nhist) {
if (adb->old_keys == NULL) {
adb->old_keys = (osa_pw_hist_ent *)
malloc((nkeys + 1) * sizeof (osa_pw_hist_ent));
} else {
adb->old_keys = (osa_pw_hist_ent *)
realloc(adb->old_keys,
(nkeys + 1) * sizeof (osa_pw_hist_ent));
}
if (adb->old_keys == NULL)
return(ENOMEM);
memset(&adb->old_keys[nkeys], 0, sizeof(osa_pw_hist_ent));
nkeys = ++adb->old_key_len;
/*
* To avoid losing old keys, shift forward each entry after
* knext.
*/
for (i = nkeys - 1; i > knext; i--) {
adb->old_keys[i] = adb->old_keys[i - 1];
}
memset(&adb->old_keys[knext], 0, sizeof(osa_pw_hist_ent));
} else if (nkeys + 1 > nhist) {
/*
* The policy must have changed! Shrink the array.
* Can't simply realloc() down, since it might be wrapped.
* To understand the arithmetic below, note that we are
* copying into new positions 0 .. N-1 from old positions
* old_key_next-N .. old_key_next-1, modulo old_key_len,
* where N = pw_history_num - 1 is the length of the
* shortened list. Matt Crawford, FNAL
*/
/*
* M = adb->old_key_len, N = pol->pw_history_num - 1
*
* tmp[0] .. tmp[N-1] = old[(knext-N)%M] .. old[(knext-1)%M]
*/
int j;
osa_pw_hist_t tmp;
tmp = (osa_pw_hist_ent *)
malloc((nhist - 1) * sizeof (osa_pw_hist_ent));
if (tmp == NULL)
return ENOMEM;
for (i = 0; i < nhist - 1; i++) {
/*
* Add nkeys once before taking remainder to avoid
* negative values.
*/
j = (i + nkeys + knext - (nhist - 1)) % nkeys;
tmp[i] = adb->old_keys[j];
}
/* Now free the ones we don't keep (the oldest ones) */
for (i = 0; i < nkeys - (nhist - 1); i++) {
j = (i + nkeys + knext) % nkeys;
histp = &adb->old_keys[j];
for (j = 0; j < histp->n_key_data; j++) {
krb5_free_key_data_contents(context, &histp->key_data[j]);
}
free(histp->key_data);
}
free(adb->old_keys);
adb->old_keys = tmp;
nkeys = adb->old_key_len = nhist - 1;
knext = adb->old_key_next = 0;
}
/*
* If nhist decreased since the last password change, and nkeys+1
* is less than the previous nhist, it is possible for knext to
* index into unallocated space. This condition would not be
* caught by the resizing code above.
*/
if (knext + 1 > nkeys)
knext = adb->old_key_next = 0;
/* free the old pw history entry if it contains data */
histp = &adb->old_keys[knext];
for (i = 0; i < (unsigned int) histp->n_key_data; i++)
krb5_free_key_data_contents(context, &histp->key_data[i]);
free(histp->key_data);
/* store the new entry */
adb->old_keys[knext] = *pw;
/* update the next pointer */
if (++adb->old_key_next == nhist - 1)
adb->old_key_next = 0;
return(0);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,168 | check_pw_reuse(krb5_context context,
krb5_keyblock *hist_keyblocks,
int n_new_key_data, krb5_key_data *new_key_data,
unsigned int n_pw_hist_data, osa_pw_hist_ent *pw_hist_data)
{
unsigned int x, y, z;
krb5_keyblock newkey, histkey, *kb;
krb5_key_data *key_data;
krb5_error_code ret;
assert (n_new_key_data >= 0);
for (x = 0; x < (unsigned) n_new_key_data; x++) {
/* Check only entries with the most recent kvno. */
if (new_key_data[x].key_data_kvno != new_key_data[0].key_data_kvno)
break;
ret = krb5_dbe_decrypt_key_data(context, NULL, &(new_key_data[x]),
&newkey, NULL);
if (ret)
return(ret);
for (y = 0; y < n_pw_hist_data; y++) {
for (z = 0; z < (unsigned int) pw_hist_data[y].n_key_data; z++) {
for (kb = hist_keyblocks; kb->enctype != 0; kb++) {
key_data = &pw_hist_data[y].key_data[z];
ret = krb5_dbe_decrypt_key_data(context, kb, key_data,
&histkey, NULL);
if (ret)
continue;
if (newkey.length == histkey.length &&
newkey.enctype == histkey.enctype &&
memcmp(newkey.contents, histkey.contents,
histkey.length) == 0) {
krb5_free_keyblock_contents(context, &histkey);
krb5_free_keyblock_contents(context, &newkey);
return KADM5_PASS_REUSE;
}
krb5_free_keyblock_contents(context, &histkey);
}
}
}
krb5_free_keyblock_contents(context, &newkey);
}
return(0);
}
| DoS | 0 | check_pw_reuse(krb5_context context,
krb5_keyblock *hist_keyblocks,
int n_new_key_data, krb5_key_data *new_key_data,
unsigned int n_pw_hist_data, osa_pw_hist_ent *pw_hist_data)
{
unsigned int x, y, z;
krb5_keyblock newkey, histkey, *kb;
krb5_key_data *key_data;
krb5_error_code ret;
assert (n_new_key_data >= 0);
for (x = 0; x < (unsigned) n_new_key_data; x++) {
/* Check only entries with the most recent kvno. */
if (new_key_data[x].key_data_kvno != new_key_data[0].key_data_kvno)
break;
ret = krb5_dbe_decrypt_key_data(context, NULL, &(new_key_data[x]),
&newkey, NULL);
if (ret)
return(ret);
for (y = 0; y < n_pw_hist_data; y++) {
for (z = 0; z < (unsigned int) pw_hist_data[y].n_key_data; z++) {
for (kb = hist_keyblocks; kb->enctype != 0; kb++) {
key_data = &pw_hist_data[y].key_data[z];
ret = krb5_dbe_decrypt_key_data(context, kb, key_data,
&histkey, NULL);
if (ret)
continue;
if (newkey.length == histkey.length &&
newkey.enctype == histkey.enctype &&
memcmp(newkey.contents, histkey.contents,
histkey.length) == 0) {
krb5_free_keyblock_contents(context, &histkey);
krb5_free_keyblock_contents(context, &newkey);
return KADM5_PASS_REUSE;
}
krb5_free_keyblock_contents(context, &histkey);
}
}
}
krb5_free_keyblock_contents(context, &newkey);
}
return(0);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,169 | static void cleanup_key_data(context, count, data)
krb5_context context;
int count;
krb5_key_data * data;
{
int i, j;
for (i = 0; i < count; i++)
for (j = 0; j < data[i].key_data_ver; j++)
if (data[i].key_data_length[j])
krb5_db_free(context, data[i].key_data_contents[j]);
krb5_db_free(context, data);
}
| DoS | 0 | static void cleanup_key_data(context, count, data)
krb5_context context;
int count;
krb5_key_data * data;
{
int i, j;
for (i = 0; i < count; i++)
for (j = 0; j < data[i].key_data_ver; j++)
if (data[i].key_data_length[j])
krb5_db_free(context, data[i].key_data_contents[j]);
krb5_db_free(context, data);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,170 | int create_history_entry(krb5_context context,
krb5_keyblock *hist_key, int n_key_data,
krb5_key_data *key_data, osa_pw_hist_ent *hist)
{
int i, ret;
krb5_keyblock key;
krb5_keysalt salt;
hist->key_data = (krb5_key_data*)malloc(n_key_data*sizeof(krb5_key_data));
if (hist->key_data == NULL)
return ENOMEM;
memset(hist->key_data, 0, n_key_data*sizeof(krb5_key_data));
for (i = 0; i < n_key_data; i++) {
ret = krb5_dbe_decrypt_key_data(context, NULL, &key_data[i], &key,
&salt);
if (ret)
return ret;
ret = krb5_dbe_encrypt_key_data(context, hist_key, &key, &salt,
key_data[i].key_data_kvno,
&hist->key_data[i]);
if (ret)
return ret;
krb5_free_keyblock_contents(context, &key);
/* krb5_free_keysalt(context, &salt); */
}
hist->n_key_data = n_key_data;
return 0;
}
| DoS | 0 | int create_history_entry(krb5_context context,
krb5_keyblock *hist_key, int n_key_data,
krb5_key_data *key_data, osa_pw_hist_ent *hist)
{
int i, ret;
krb5_keyblock key;
krb5_keysalt salt;
hist->key_data = (krb5_key_data*)malloc(n_key_data*sizeof(krb5_key_data));
if (hist->key_data == NULL)
return ENOMEM;
memset(hist->key_data, 0, n_key_data*sizeof(krb5_key_data));
for (i = 0; i < n_key_data; i++) {
ret = krb5_dbe_decrypt_key_data(context, NULL, &key_data[i], &key,
&salt);
if (ret)
return ret;
ret = krb5_dbe_encrypt_key_data(context, hist_key, &key, &salt,
key_data[i].key_data_kvno,
&hist->key_data[i]);
if (ret)
return ret;
krb5_free_keyblock_contents(context, &key);
/* krb5_free_keysalt(context, &salt); */
}
hist->n_key_data = n_key_data;
return 0;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,171 | void free_history_entry(krb5_context context, osa_pw_hist_ent *hist)
{
int i;
for (i = 0; i < hist->n_key_data; i++)
krb5_free_key_data_contents(context, &hist->key_data[i]);
free(hist->key_data);
}
| DoS | 0 | void free_history_entry(krb5_context context, osa_pw_hist_ent *hist)
{
int i;
for (i = 0; i < hist->n_key_data; i++)
krb5_free_key_data_contents(context, &hist->key_data[i]);
free(hist->key_data);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,172 | kadm5_chpass_principal(void *server_handle,
krb5_principal principal, char *password)
{
return
kadm5_chpass_principal_3(server_handle, principal, FALSE,
0, NULL, password);
}
| DoS | 0 | kadm5_chpass_principal(void *server_handle,
krb5_principal principal, char *password)
{
return
kadm5_chpass_principal_3(server_handle, principal, FALSE,
0, NULL, password);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,173 | kadm5_chpass_principal_3(void *server_handle,
krb5_principal principal, krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
char *password)
{
krb5_int32 now;
kadm5_policy_ent_rec pol;
osa_princ_ent_rec adb;
krb5_db_entry *kdb;
int ret, ret2, last_pwd, hist_added;
int have_pol = 0;
kadm5_server_handle_t handle = server_handle;
osa_pw_hist_ent hist;
krb5_keyblock *act_mkey, *hist_keyblocks = NULL;
krb5_kvno act_kvno, hist_kvno;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
hist_added = 0;
memset(&hist, 0, sizeof(hist));
if (principal == NULL || password == NULL)
return EINVAL;
if ((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE)
return KADM5_PROTECT_PRINCIPAL;
/* Use default keysalts if caller did not provide any. */
if (n_ks_tuple == 0) {
ks_tuple = handle->params.keysalts;
n_ks_tuple = handle->params.num_keysalts;
}
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy, &pol)))
goto done;
have_pol = 1;
/* Create a password history entry before we change kdb's key_data. */
ret = kdb_get_hist_key(handle, &hist_keyblocks, &hist_kvno);
if (ret)
goto done;
ret = create_history_entry(handle->context, &hist_keyblocks[0],
kdb->n_key_data, kdb->key_data, &hist);
if (ret)
goto done;
}
if ((ret = passwd_check(handle, password, have_pol ? &pol : NULL,
principal)))
goto done;
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, &act_kvno,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_cpw(handle->context, act_mkey, ks_tuple, n_ks_tuple,
password, 0 /* increment kvno */,
keepold, kdb);
if (ret)
goto done;
ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno);
if (ret)
goto done;
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
/* the policy was loaded before */
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd);
if (ret)
goto done;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if ((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
ret = check_pw_reuse(handle->context, hist_keyblocks,
kdb->n_key_data, kdb->key_data,
1, &hist);
if (ret)
goto done;
if (pol.pw_history_num > 1) {
/* If hist_kvno has changed since the last password change, we
* can't check the history. */
if (adb.admin_history_kvno == hist_kvno) {
ret = check_pw_reuse(handle->context, hist_keyblocks,
kdb->n_key_data, kdb->key_data,
adb.old_key_len, adb.old_keys);
if (ret)
goto done;
}
ret = add_to_history(handle->context, hist_kvno, &adb, &pol,
&hist);
if (ret)
goto done;
hist_added = 1;
}
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
#ifdef USE_PASSWORD_SERVER
if (kadm5_use_password_server () &&
(krb5_princ_size (handle->context, principal) == 1)) {
krb5_data *princ = krb5_princ_component (handle->context, principal, 0);
const char *path = "/usr/sbin/mkpassdb";
char *argv[] = { "mkpassdb", "-setpassword", NULL, NULL };
char *pstring = NULL;
if (!ret) {
pstring = malloc ((princ->length + 1) * sizeof (char));
if (pstring == NULL) { ret = ENOMEM; }
}
if (!ret) {
memcpy (pstring, princ->data, princ->length);
pstring [princ->length] = '\0';
argv[2] = pstring;
ret = kadm5_launch_task (handle->context, path, argv, password);
}
if (pstring != NULL)
free (pstring);
if (ret)
goto done;
}
#endif
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
/* key data and attributes changed, let the database provider know */
kdb->mask = KADM5_KEY_DATA | KADM5_ATTRIBUTES |
KADM5_FAIL_AUTH_COUNT;
/* | KADM5_CPW_FUNCTION */
ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold,
n_ks_tuple, ks_tuple, password);
if (ret)
goto done;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
(void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal,
keepold, n_ks_tuple, ks_tuple, password);
ret = KADM5_OK;
done:
if (!hist_added && hist.key_data)
free_history_entry(handle->context, &hist);
kdb_free_entry(handle, kdb, &adb);
kdb_free_keyblocks(handle, hist_keyblocks);
if (have_pol && (ret2 = kadm5_free_policy_ent(handle->lhandle, &pol))
&& !ret)
ret = ret2;
return ret;
}
| DoS | 0 | kadm5_chpass_principal_3(void *server_handle,
krb5_principal principal, krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
char *password)
{
krb5_int32 now;
kadm5_policy_ent_rec pol;
osa_princ_ent_rec adb;
krb5_db_entry *kdb;
int ret, ret2, last_pwd, hist_added;
int have_pol = 0;
kadm5_server_handle_t handle = server_handle;
osa_pw_hist_ent hist;
krb5_keyblock *act_mkey, *hist_keyblocks = NULL;
krb5_kvno act_kvno, hist_kvno;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
hist_added = 0;
memset(&hist, 0, sizeof(hist));
if (principal == NULL || password == NULL)
return EINVAL;
if ((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE)
return KADM5_PROTECT_PRINCIPAL;
/* Use default keysalts if caller did not provide any. */
if (n_ks_tuple == 0) {
ks_tuple = handle->params.keysalts;
n_ks_tuple = handle->params.num_keysalts;
}
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy, &pol)))
goto done;
have_pol = 1;
/* Create a password history entry before we change kdb's key_data. */
ret = kdb_get_hist_key(handle, &hist_keyblocks, &hist_kvno);
if (ret)
goto done;
ret = create_history_entry(handle->context, &hist_keyblocks[0],
kdb->n_key_data, kdb->key_data, &hist);
if (ret)
goto done;
}
if ((ret = passwd_check(handle, password, have_pol ? &pol : NULL,
principal)))
goto done;
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, &act_kvno,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_cpw(handle->context, act_mkey, ks_tuple, n_ks_tuple,
password, 0 /* increment kvno */,
keepold, kdb);
if (ret)
goto done;
ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno);
if (ret)
goto done;
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
/* the policy was loaded before */
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd);
if (ret)
goto done;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if ((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
ret = check_pw_reuse(handle->context, hist_keyblocks,
kdb->n_key_data, kdb->key_data,
1, &hist);
if (ret)
goto done;
if (pol.pw_history_num > 1) {
/* If hist_kvno has changed since the last password change, we
* can't check the history. */
if (adb.admin_history_kvno == hist_kvno) {
ret = check_pw_reuse(handle->context, hist_keyblocks,
kdb->n_key_data, kdb->key_data,
adb.old_key_len, adb.old_keys);
if (ret)
goto done;
}
ret = add_to_history(handle->context, hist_kvno, &adb, &pol,
&hist);
if (ret)
goto done;
hist_added = 1;
}
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
#ifdef USE_PASSWORD_SERVER
if (kadm5_use_password_server () &&
(krb5_princ_size (handle->context, principal) == 1)) {
krb5_data *princ = krb5_princ_component (handle->context, principal, 0);
const char *path = "/usr/sbin/mkpassdb";
char *argv[] = { "mkpassdb", "-setpassword", NULL, NULL };
char *pstring = NULL;
if (!ret) {
pstring = malloc ((princ->length + 1) * sizeof (char));
if (pstring == NULL) { ret = ENOMEM; }
}
if (!ret) {
memcpy (pstring, princ->data, princ->length);
pstring [princ->length] = '\0';
argv[2] = pstring;
ret = kadm5_launch_task (handle->context, path, argv, password);
}
if (pstring != NULL)
free (pstring);
if (ret)
goto done;
}
#endif
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
/* key data and attributes changed, let the database provider know */
kdb->mask = KADM5_KEY_DATA | KADM5_ATTRIBUTES |
KADM5_FAIL_AUTH_COUNT;
/* | KADM5_CPW_FUNCTION */
ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold,
n_ks_tuple, ks_tuple, password);
if (ret)
goto done;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
(void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal,
keepold, n_ks_tuple, ks_tuple, password);
ret = KADM5_OK;
done:
if (!hist_added && hist.key_data)
free_history_entry(handle->context, &hist);
kdb_free_entry(handle, kdb, &adb);
kdb_free_keyblocks(handle, hist_keyblocks);
if (have_pol && (ret2 = kadm5_free_policy_ent(handle->lhandle, &pol))
&& !ret)
ret = ret2;
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,174 | kadm5_copy_principal(krb5_context context, krb5_const_principal inprinc, krb5_principal *outprinc)
{
register krb5_principal tempprinc;
register int i, nelems;
tempprinc = (krb5_principal)krb5_db_alloc(context, NULL, sizeof(krb5_principal_data));
if (tempprinc == 0)
return ENOMEM;
VALGRIND_CHECK_DEFINED(*inprinc);
*tempprinc = *inprinc;
nelems = (int) krb5_princ_size(context, inprinc);
tempprinc->data = krb5_db_alloc(context, NULL, nelems * sizeof(krb5_data));
if (tempprinc->data == 0) {
krb5_db_free(context, (char *)tempprinc);
return ENOMEM;
}
for (i = 0; i < nelems; i++) {
unsigned int len = krb5_princ_component(context, inprinc, i)->length;
krb5_princ_component(context, tempprinc, i)->length = len;
if (((krb5_princ_component(context, tempprinc, i)->data =
krb5_db_alloc(context, NULL, len)) == 0) && len) {
while (--i >= 0)
krb5_db_free(context, krb5_princ_component(context, tempprinc, i)->data);
krb5_db_free (context, tempprinc->data);
krb5_db_free (context, tempprinc);
return ENOMEM;
}
if (len)
memcpy(krb5_princ_component(context, tempprinc, i)->data,
krb5_princ_component(context, inprinc, i)->data, len);
krb5_princ_component(context, tempprinc, i)->magic = KV5M_DATA;
}
tempprinc->realm.data =
krb5_db_alloc(context, NULL, tempprinc->realm.length = inprinc->realm.length);
if (!tempprinc->realm.data && tempprinc->realm.length) {
for (i = 0; i < nelems; i++)
krb5_db_free(context, krb5_princ_component(context, tempprinc, i)->data);
krb5_db_free(context, tempprinc->data);
krb5_db_free(context, tempprinc);
return ENOMEM;
}
if (tempprinc->realm.length)
memcpy(tempprinc->realm.data, inprinc->realm.data,
inprinc->realm.length);
*outprinc = tempprinc;
return 0;
}
| DoS | 0 | kadm5_copy_principal(krb5_context context, krb5_const_principal inprinc, krb5_principal *outprinc)
{
register krb5_principal tempprinc;
register int i, nelems;
tempprinc = (krb5_principal)krb5_db_alloc(context, NULL, sizeof(krb5_principal_data));
if (tempprinc == 0)
return ENOMEM;
VALGRIND_CHECK_DEFINED(*inprinc);
*tempprinc = *inprinc;
nelems = (int) krb5_princ_size(context, inprinc);
tempprinc->data = krb5_db_alloc(context, NULL, nelems * sizeof(krb5_data));
if (tempprinc->data == 0) {
krb5_db_free(context, (char *)tempprinc);
return ENOMEM;
}
for (i = 0; i < nelems; i++) {
unsigned int len = krb5_princ_component(context, inprinc, i)->length;
krb5_princ_component(context, tempprinc, i)->length = len;
if (((krb5_princ_component(context, tempprinc, i)->data =
krb5_db_alloc(context, NULL, len)) == 0) && len) {
while (--i >= 0)
krb5_db_free(context, krb5_princ_component(context, tempprinc, i)->data);
krb5_db_free (context, tempprinc->data);
krb5_db_free (context, tempprinc);
return ENOMEM;
}
if (len)
memcpy(krb5_princ_component(context, tempprinc, i)->data,
krb5_princ_component(context, inprinc, i)->data, len);
krb5_princ_component(context, tempprinc, i)->magic = KV5M_DATA;
}
tempprinc->realm.data =
krb5_db_alloc(context, NULL, tempprinc->realm.length = inprinc->realm.length);
if (!tempprinc->realm.data && tempprinc->realm.length) {
for (i = 0; i < nelems; i++)
krb5_db_free(context, krb5_princ_component(context, tempprinc, i)->data);
krb5_db_free(context, tempprinc->data);
krb5_db_free(context, tempprinc);
return ENOMEM;
}
if (tempprinc->realm.length)
memcpy(tempprinc->realm.data, inprinc->realm.data,
inprinc->realm.length);
*outprinc = tempprinc;
return 0;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,175 | kadm5_create_principal(void *server_handle,
kadm5_principal_ent_t entry, long mask,
char *password)
{
return
kadm5_create_principal_3(server_handle, entry, mask,
0, NULL, password);
}
| DoS | 0 | kadm5_create_principal(void *server_handle,
kadm5_principal_ent_t entry, long mask,
char *password)
{
return
kadm5_create_principal_3(server_handle, entry, mask,
0, NULL, password);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,176 | kadm5_create_principal_3(void *server_handle,
kadm5_principal_ent_t entry, long mask,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
char *password)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
kadm5_policy_ent_rec polent;
krb5_boolean have_polent = FALSE;
krb5_int32 now;
krb5_tl_data *tl_data_orig, *tl_data_tail;
unsigned int ret;
kadm5_server_handle_t handle = server_handle;
krb5_keyblock *act_mkey;
krb5_kvno act_kvno;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
check_1_6_dummy(entry, mask, n_ks_tuple, ks_tuple, &password);
/*
* Argument sanity checking, and opening up the DB
*/
if(!(mask & KADM5_PRINCIPAL) || (mask & KADM5_MOD_NAME) ||
(mask & KADM5_MOD_TIME) || (mask & KADM5_LAST_PWD_CHANGE) ||
(mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) ||
(mask & KADM5_KEY_DATA) || (mask & KADM5_LAST_SUCCESS) ||
(mask & KADM5_LAST_FAILED) || (mask & KADM5_FAIL_AUTH_COUNT))
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR))
return KADM5_BAD_MASK;
if((mask & ~ALL_PRINC_MASK))
return KADM5_BAD_MASK;
if (entry == NULL)
return EINVAL;
/* Use default keysalts if caller did not provide any. */
if (n_ks_tuple == 0) {
ks_tuple = handle->params.keysalts;
n_ks_tuple = handle->params.num_keysalts;
}
/*
* Check to see if the principal exists
*/
ret = kdb_get_entry(handle, entry->principal, &kdb, &adb);
switch(ret) {
case KADM5_UNK_PRINC:
break;
case 0:
kdb_free_entry(handle, kdb, &adb);
return KADM5_DUP;
default:
return ret;
}
kdb = krb5_db_alloc(handle->context, NULL, sizeof(*kdb));
if (kdb == NULL)
return ENOMEM;
memset(kdb, 0, sizeof(*kdb));
memset(&adb, 0, sizeof(osa_princ_ent_rec));
/*
* If a policy was specified, load it.
* If we can not find the one specified return an error
*/
if ((mask & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, entry->policy,
&polent)) != KADM5_OK) {
if (ret == EINVAL)
ret = KADM5_BAD_POLICY;
if (ret)
goto cleanup;
}
have_polent = TRUE;
}
if (password) {
ret = passwd_check(handle, password, have_polent ? &polent : NULL,
entry->principal);
if (ret)
goto cleanup;
}
/*
* Start populating the various DB fields, using the
* "defaults" for fields that were not specified by the
* mask.
*/
if ((ret = krb5_timeofday(handle->context, &now)))
goto cleanup;
kdb->magic = KRB5_KDB_MAGIC_NUMBER;
kdb->len = KRB5_KDB_V1_BASE_LENGTH; /* gag me with a chainsaw */
if ((mask & KADM5_ATTRIBUTES))
kdb->attributes = entry->attributes;
else
kdb->attributes = handle->params.flags;
if ((mask & KADM5_MAX_LIFE))
kdb->max_life = entry->max_life;
else
kdb->max_life = handle->params.max_life;
if (mask & KADM5_MAX_RLIFE)
kdb->max_renewable_life = entry->max_renewable_life;
else
kdb->max_renewable_life = handle->params.max_rlife;
if ((mask & KADM5_PRINC_EXPIRE_TIME))
kdb->expiration = entry->princ_expire_time;
else
kdb->expiration = handle->params.expiration;
kdb->pw_expiration = 0;
if (have_polent) {
if(polent.pw_max_life)
kdb->pw_expiration = now + polent.pw_max_life;
else
kdb->pw_expiration = 0;
}
if ((mask & KADM5_PW_EXPIRATION))
kdb->pw_expiration = entry->pw_expiration;
kdb->last_success = 0;
kdb->last_failed = 0;
kdb->fail_auth_count = 0;
/* this is kind of gross, but in order to free the tl data, I need
to free the entire kdb entry, and that will try to free the
principal. */
if ((ret = kadm5_copy_principal(handle->context,
entry->principal, &(kdb->princ))))
goto cleanup;
if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now)))
goto cleanup;
if (mask & KADM5_TL_DATA) {
/* splice entry->tl_data onto the front of kdb->tl_data */
tl_data_orig = kdb->tl_data;
for (tl_data_tail = entry->tl_data; tl_data_tail;
tl_data_tail = tl_data_tail->tl_data_next)
{
ret = krb5_dbe_update_tl_data(handle->context, kdb, tl_data_tail);
if( ret )
goto cleanup;
}
}
/* initialize the keys */
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, &act_kvno,
&act_mkey);
if (ret)
goto cleanup;
if (password) {
ret = krb5_dbe_cpw(handle->context, act_mkey, ks_tuple, n_ks_tuple,
password, (mask & KADM5_KVNO)?entry->kvno:1,
FALSE, kdb);
} else {
/* Null password means create with random key (new in 1.8). */
ret = krb5_dbe_crk(handle->context, &master_keyblock,
ks_tuple, n_ks_tuple, FALSE, kdb);
}
if (ret)
goto cleanup;
/* Record the master key VNO used to encrypt this entry's keys */
ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno);
if (ret)
goto cleanup;
ret = k5_kadm5_hook_create(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, entry, mask,
n_ks_tuple, ks_tuple, password);
if (ret)
goto cleanup;
/* populate the admin-server-specific fields. In the OV server,
this used to be in a separate database. Since there's already
marshalling code for the admin fields, to keep things simple,
I'm going to keep it, and make all the admin stuff occupy a
single tl_data record, */
adb.admin_history_kvno = INITIAL_HIST_KVNO;
if (have_polent) {
adb.aux_attributes = KADM5_POLICY;
/* this does *not* need to be strdup'ed, because adb is xdr */
/* encoded in osa_adb_create_princ, and not ever freed */
adb.policy = entry->policy;
}
/* increment the policy ref count, if any */
if (have_polent) {
polent.policy_refcnt++;
if ((ret = kadm5_modify_policy_internal(handle->lhandle, &polent,
KADM5_REF_COUNT))
!= KADM5_OK)
goto cleanup;
}
/* In all cases key and the principal data is set, let the database provider know */
kdb->mask = mask | KADM5_KEY_DATA | KADM5_PRINCIPAL ;
/* store the new db entry */
ret = kdb_put_entry(handle, kdb, &adb);
if (ret) {
if (have_polent) {
/* decrement the policy ref count */
polent.policy_refcnt--;
/*
* if this fails, there's nothing we can do anyway. the
* policy refcount wil be too high.
*/
(void) kadm5_modify_policy_internal(handle->lhandle, &polent,
KADM5_REF_COUNT);
}
}
(void) k5_kadm5_hook_create(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask,
n_ks_tuple, ks_tuple, password);
cleanup:
krb5_db_free_principal(handle->context, kdb);
if (have_polent)
(void) kadm5_free_policy_ent(handle->lhandle, &polent);
return ret;
}
| DoS | 0 | kadm5_create_principal_3(void *server_handle,
kadm5_principal_ent_t entry, long mask,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
char *password)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
kadm5_policy_ent_rec polent;
krb5_boolean have_polent = FALSE;
krb5_int32 now;
krb5_tl_data *tl_data_orig, *tl_data_tail;
unsigned int ret;
kadm5_server_handle_t handle = server_handle;
krb5_keyblock *act_mkey;
krb5_kvno act_kvno;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
check_1_6_dummy(entry, mask, n_ks_tuple, ks_tuple, &password);
/*
* Argument sanity checking, and opening up the DB
*/
if(!(mask & KADM5_PRINCIPAL) || (mask & KADM5_MOD_NAME) ||
(mask & KADM5_MOD_TIME) || (mask & KADM5_LAST_PWD_CHANGE) ||
(mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) ||
(mask & KADM5_KEY_DATA) || (mask & KADM5_LAST_SUCCESS) ||
(mask & KADM5_LAST_FAILED) || (mask & KADM5_FAIL_AUTH_COUNT))
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR))
return KADM5_BAD_MASK;
if((mask & ~ALL_PRINC_MASK))
return KADM5_BAD_MASK;
if (entry == NULL)
return EINVAL;
/* Use default keysalts if caller did not provide any. */
if (n_ks_tuple == 0) {
ks_tuple = handle->params.keysalts;
n_ks_tuple = handle->params.num_keysalts;
}
/*
* Check to see if the principal exists
*/
ret = kdb_get_entry(handle, entry->principal, &kdb, &adb);
switch(ret) {
case KADM5_UNK_PRINC:
break;
case 0:
kdb_free_entry(handle, kdb, &adb);
return KADM5_DUP;
default:
return ret;
}
kdb = krb5_db_alloc(handle->context, NULL, sizeof(*kdb));
if (kdb == NULL)
return ENOMEM;
memset(kdb, 0, sizeof(*kdb));
memset(&adb, 0, sizeof(osa_princ_ent_rec));
/*
* If a policy was specified, load it.
* If we can not find the one specified return an error
*/
if ((mask & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, entry->policy,
&polent)) != KADM5_OK) {
if (ret == EINVAL)
ret = KADM5_BAD_POLICY;
if (ret)
goto cleanup;
}
have_polent = TRUE;
}
if (password) {
ret = passwd_check(handle, password, have_polent ? &polent : NULL,
entry->principal);
if (ret)
goto cleanup;
}
/*
* Start populating the various DB fields, using the
* "defaults" for fields that were not specified by the
* mask.
*/
if ((ret = krb5_timeofday(handle->context, &now)))
goto cleanup;
kdb->magic = KRB5_KDB_MAGIC_NUMBER;
kdb->len = KRB5_KDB_V1_BASE_LENGTH; /* gag me with a chainsaw */
if ((mask & KADM5_ATTRIBUTES))
kdb->attributes = entry->attributes;
else
kdb->attributes = handle->params.flags;
if ((mask & KADM5_MAX_LIFE))
kdb->max_life = entry->max_life;
else
kdb->max_life = handle->params.max_life;
if (mask & KADM5_MAX_RLIFE)
kdb->max_renewable_life = entry->max_renewable_life;
else
kdb->max_renewable_life = handle->params.max_rlife;
if ((mask & KADM5_PRINC_EXPIRE_TIME))
kdb->expiration = entry->princ_expire_time;
else
kdb->expiration = handle->params.expiration;
kdb->pw_expiration = 0;
if (have_polent) {
if(polent.pw_max_life)
kdb->pw_expiration = now + polent.pw_max_life;
else
kdb->pw_expiration = 0;
}
if ((mask & KADM5_PW_EXPIRATION))
kdb->pw_expiration = entry->pw_expiration;
kdb->last_success = 0;
kdb->last_failed = 0;
kdb->fail_auth_count = 0;
/* this is kind of gross, but in order to free the tl data, I need
to free the entire kdb entry, and that will try to free the
principal. */
if ((ret = kadm5_copy_principal(handle->context,
entry->principal, &(kdb->princ))))
goto cleanup;
if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now)))
goto cleanup;
if (mask & KADM5_TL_DATA) {
/* splice entry->tl_data onto the front of kdb->tl_data */
tl_data_orig = kdb->tl_data;
for (tl_data_tail = entry->tl_data; tl_data_tail;
tl_data_tail = tl_data_tail->tl_data_next)
{
ret = krb5_dbe_update_tl_data(handle->context, kdb, tl_data_tail);
if( ret )
goto cleanup;
}
}
/* initialize the keys */
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, &act_kvno,
&act_mkey);
if (ret)
goto cleanup;
if (password) {
ret = krb5_dbe_cpw(handle->context, act_mkey, ks_tuple, n_ks_tuple,
password, (mask & KADM5_KVNO)?entry->kvno:1,
FALSE, kdb);
} else {
/* Null password means create with random key (new in 1.8). */
ret = krb5_dbe_crk(handle->context, &master_keyblock,
ks_tuple, n_ks_tuple, FALSE, kdb);
}
if (ret)
goto cleanup;
/* Record the master key VNO used to encrypt this entry's keys */
ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno);
if (ret)
goto cleanup;
ret = k5_kadm5_hook_create(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, entry, mask,
n_ks_tuple, ks_tuple, password);
if (ret)
goto cleanup;
/* populate the admin-server-specific fields. In the OV server,
this used to be in a separate database. Since there's already
marshalling code for the admin fields, to keep things simple,
I'm going to keep it, and make all the admin stuff occupy a
single tl_data record, */
adb.admin_history_kvno = INITIAL_HIST_KVNO;
if (have_polent) {
adb.aux_attributes = KADM5_POLICY;
/* this does *not* need to be strdup'ed, because adb is xdr */
/* encoded in osa_adb_create_princ, and not ever freed */
adb.policy = entry->policy;
}
/* increment the policy ref count, if any */
if (have_polent) {
polent.policy_refcnt++;
if ((ret = kadm5_modify_policy_internal(handle->lhandle, &polent,
KADM5_REF_COUNT))
!= KADM5_OK)
goto cleanup;
}
/* In all cases key and the principal data is set, let the database provider know */
kdb->mask = mask | KADM5_KEY_DATA | KADM5_PRINCIPAL ;
/* store the new db entry */
ret = kdb_put_entry(handle, kdb, &adb);
if (ret) {
if (have_polent) {
/* decrement the policy ref count */
polent.policy_refcnt--;
/*
* if this fails, there's nothing we can do anyway. the
* policy refcount wil be too high.
*/
(void) kadm5_modify_policy_internal(handle->lhandle, &polent,
KADM5_REF_COUNT);
}
}
(void) k5_kadm5_hook_create(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask,
n_ks_tuple, ks_tuple, password);
cleanup:
krb5_db_free_principal(handle->context, kdb);
if (have_polent)
(void) kadm5_free_policy_ent(handle->lhandle, &polent);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,177 | kadm5_ret_t kadm5_decrypt_key(void *server_handle,
kadm5_principal_ent_t entry, krb5_int32
ktype, krb5_int32 stype, krb5_int32
kvno, krb5_keyblock *keyblock,
krb5_keysalt *keysalt, int *kvnop)
{
kadm5_server_handle_t handle = server_handle;
krb5_db_entry dbent;
krb5_key_data *key_data;
krb5_keyblock *mkey_ptr;
int ret;
CHECK_HANDLE(server_handle);
if (entry->n_key_data == 0 || entry->key_data == NULL)
return EINVAL;
/* find_enctype only uses these two fields */
dbent.n_key_data = entry->n_key_data;
dbent.key_data = entry->key_data;
if ((ret = krb5_dbe_find_enctype(handle->context, &dbent, ktype,
stype, kvno, &key_data)))
return ret;
/* find_mkey only uses this field */
dbent.tl_data = entry->tl_data;
if ((ret = krb5_dbe_find_mkey(handle->context, &dbent, &mkey_ptr))) {
/* try refreshing master key list */
/* XXX it would nice if we had the mkvno here for optimization */
if (krb5_db_fetch_mkey_list(handle->context, master_princ,
&master_keyblock) == 0) {
if ((ret = krb5_dbe_find_mkey(handle->context, &dbent,
&mkey_ptr))) {
return ret;
}
} else {
return ret;
}
}
if ((ret = krb5_dbe_decrypt_key_data(handle->context, NULL, key_data,
keyblock, keysalt)))
return ret;
/*
* Coerce the enctype of the output keyblock in case we got an
* inexact match on the enctype; this behavior will go away when
* the key storage architecture gets redesigned for 1.3.
*/
if (ktype != -1)
keyblock->enctype = ktype;
if (kvnop)
*kvnop = key_data->key_data_kvno;
return KADM5_OK;
}
| DoS | 0 | kadm5_ret_t kadm5_decrypt_key(void *server_handle,
kadm5_principal_ent_t entry, krb5_int32
ktype, krb5_int32 stype, krb5_int32
kvno, krb5_keyblock *keyblock,
krb5_keysalt *keysalt, int *kvnop)
{
kadm5_server_handle_t handle = server_handle;
krb5_db_entry dbent;
krb5_key_data *key_data;
krb5_keyblock *mkey_ptr;
int ret;
CHECK_HANDLE(server_handle);
if (entry->n_key_data == 0 || entry->key_data == NULL)
return EINVAL;
/* find_enctype only uses these two fields */
dbent.n_key_data = entry->n_key_data;
dbent.key_data = entry->key_data;
if ((ret = krb5_dbe_find_enctype(handle->context, &dbent, ktype,
stype, kvno, &key_data)))
return ret;
/* find_mkey only uses this field */
dbent.tl_data = entry->tl_data;
if ((ret = krb5_dbe_find_mkey(handle->context, &dbent, &mkey_ptr))) {
/* try refreshing master key list */
/* XXX it would nice if we had the mkvno here for optimization */
if (krb5_db_fetch_mkey_list(handle->context, master_princ,
&master_keyblock) == 0) {
if ((ret = krb5_dbe_find_mkey(handle->context, &dbent,
&mkey_ptr))) {
return ret;
}
} else {
return ret;
}
}
if ((ret = krb5_dbe_decrypt_key_data(handle->context, NULL, key_data,
keyblock, keysalt)))
return ret;
/*
* Coerce the enctype of the output keyblock in case we got an
* inexact match on the enctype; this behavior will go away when
* the key storage architecture gets redesigned for 1.3.
*/
if (ktype != -1)
keyblock->enctype = ktype;
if (kvnop)
*kvnop = key_data->key_data_kvno;
return KADM5_OK;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,178 | kadm5_delete_principal(void *server_handle, krb5_principal principal)
{
unsigned int ret;
kadm5_policy_ent_rec polent;
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
kadm5_server_handle_t handle = server_handle;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL)
return EINVAL;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
ret = k5_kadm5_hook_remove(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal);
if (ret) {
kdb_free_entry(handle, kdb, &adb);
return ret;
}
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle,
adb.policy, &polent))
== KADM5_OK) {
polent.policy_refcnt--;
if ((ret = kadm5_modify_policy_internal(handle->lhandle, &polent,
KADM5_REF_COUNT))
!= KADM5_OK) {
(void) kadm5_free_policy_ent(handle->lhandle, &polent);
kdb_free_entry(handle, kdb, &adb);
return(ret);
}
}
if ((ret = kadm5_free_policy_ent(handle->lhandle, &polent))) {
kdb_free_entry(handle, kdb, &adb);
return ret;
}
}
ret = kdb_delete_entry(handle, principal);
kdb_free_entry(handle, kdb, &adb);
if (ret == 0)
(void) k5_kadm5_hook_remove(handle->context,
handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal);
return ret;
}
| DoS | 0 | kadm5_delete_principal(void *server_handle, krb5_principal principal)
{
unsigned int ret;
kadm5_policy_ent_rec polent;
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
kadm5_server_handle_t handle = server_handle;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL)
return EINVAL;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
ret = k5_kadm5_hook_remove(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal);
if (ret) {
kdb_free_entry(handle, kdb, &adb);
return ret;
}
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle,
adb.policy, &polent))
== KADM5_OK) {
polent.policy_refcnt--;
if ((ret = kadm5_modify_policy_internal(handle->lhandle, &polent,
KADM5_REF_COUNT))
!= KADM5_OK) {
(void) kadm5_free_policy_ent(handle->lhandle, &polent);
kdb_free_entry(handle, kdb, &adb);
return(ret);
}
}
if ((ret = kadm5_free_policy_ent(handle->lhandle, &polent))) {
kdb_free_entry(handle, kdb, &adb);
return ret;
}
}
ret = kdb_delete_entry(handle, principal);
kdb_free_entry(handle, kdb, &adb);
if (ret == 0)
(void) k5_kadm5_hook_remove(handle->context,
handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,179 | kadm5_free_principal(krb5_context context, krb5_principal val)
{
register krb5_int32 i;
if (!val)
return;
if (val->data) {
i = krb5_princ_size(context, val);
while(--i >= 0)
krb5_db_free(context, krb5_princ_component(context, val, i)->data);
krb5_db_free(context, val->data);
}
if (val->realm.data)
krb5_db_free(context, val->realm.data);
krb5_db_free(context, val);
}
| DoS | 0 | kadm5_free_principal(krb5_context context, krb5_principal val)
{
register krb5_int32 i;
if (!val)
return;
if (val->data) {
i = krb5_princ_size(context, val);
while(--i >= 0)
krb5_db_free(context, krb5_princ_component(context, val, i)->data);
krb5_db_free(context, val->data);
}
if (val->realm.data)
krb5_db_free(context, val->realm.data);
krb5_db_free(context, val);
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,180 | kadm5_get_principal_keys(void *server_handle /* IN */,
krb5_principal principal /* IN */,
krb5_keyblock **keyblocks /* OUT */,
int *n_keys /* OUT */)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
kadm5_ret_t ret;
kadm5_server_handle_t handle = server_handle;
if (keyblocks)
*keyblocks = NULL;
CHECK_HANDLE(server_handle);
if (principal == NULL)
return EINVAL;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
if (keyblocks) {
ret = decrypt_key_data(handle->context,
kdb->n_key_data, kdb->key_data,
keyblocks, n_keys);
if (ret)
goto done;
}
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| DoS | 0 | kadm5_get_principal_keys(void *server_handle /* IN */,
krb5_principal principal /* IN */,
krb5_keyblock **keyblocks /* OUT */,
int *n_keys /* OUT */)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
kadm5_ret_t ret;
kadm5_server_handle_t handle = server_handle;
if (keyblocks)
*keyblocks = NULL;
CHECK_HANDLE(server_handle);
if (principal == NULL)
return EINVAL;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
if (keyblocks) {
ret = decrypt_key_data(handle->context,
kdb->n_key_data, kdb->key_data,
keyblocks, n_keys);
if (ret)
goto done;
}
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,181 | kadm5_launch_task (krb5_context context,
const char *task_path, char * const task_argv[],
const char *buffer)
{
kadm5_ret_t ret;
int data_pipe[2];
ret = pipe (data_pipe);
if (ret)
ret = errno;
if (!ret) {
pid_t pid = fork ();
if (pid == -1) {
ret = errno;
close (data_pipe[0]);
close (data_pipe[1]);
} else if (pid == 0) {
/* The child: */
if (dup2 (data_pipe[0], STDIN_FILENO) == -1)
_exit (1);
close (data_pipe[0]);
close (data_pipe[1]);
execv (task_path, task_argv);
_exit (1); /* Fail if execv fails */
} else {
/* The parent: */
int status;
ret = 0;
close (data_pipe[0]);
/* Write out the buffer to the child, add \n */
if (buffer) {
if (krb5_net_write (context, data_pipe[1], buffer, strlen (buffer)) < 0
|| krb5_net_write (context, data_pipe[1], "\n", 1) < 0)
{
/* kill the child to make sure waitpid() won't hang later */
ret = errno;
kill (pid, SIGKILL);
}
}
close (data_pipe[1]);
waitpid (pid, &status, 0);
if (!ret) {
if (WIFEXITED (status)) {
/* child read password and exited. Check the return value. */
if ((WEXITSTATUS (status) != 0) && (WEXITSTATUS (status) != 252)) {
ret = KRB5KDC_ERR_POLICY; /* password change rejected */
}
} else {
/* child read password but crashed or was killed */
ret = KRB5KRB_ERR_GENERIC; /* FIXME: better error */
}
}
}
}
return ret;
}
| DoS | 0 | kadm5_launch_task (krb5_context context,
const char *task_path, char * const task_argv[],
const char *buffer)
{
kadm5_ret_t ret;
int data_pipe[2];
ret = pipe (data_pipe);
if (ret)
ret = errno;
if (!ret) {
pid_t pid = fork ();
if (pid == -1) {
ret = errno;
close (data_pipe[0]);
close (data_pipe[1]);
} else if (pid == 0) {
/* The child: */
if (dup2 (data_pipe[0], STDIN_FILENO) == -1)
_exit (1);
close (data_pipe[0]);
close (data_pipe[1]);
execv (task_path, task_argv);
_exit (1); /* Fail if execv fails */
} else {
/* The parent: */
int status;
ret = 0;
close (data_pipe[0]);
/* Write out the buffer to the child, add \n */
if (buffer) {
if (krb5_net_write (context, data_pipe[1], buffer, strlen (buffer)) < 0
|| krb5_net_write (context, data_pipe[1], "\n", 1) < 0)
{
/* kill the child to make sure waitpid() won't hang later */
ret = errno;
kill (pid, SIGKILL);
}
}
close (data_pipe[1]);
waitpid (pid, &status, 0);
if (!ret) {
if (WIFEXITED (status)) {
/* child read password and exited. Check the return value. */
if ((WEXITSTATUS (status) != 0) && (WEXITSTATUS (status) != 252)) {
ret = KRB5KDC_ERR_POLICY; /* password change rejected */
}
} else {
/* child read password but crashed or was killed */
ret = KRB5KRB_ERR_GENERIC; /* FIXME: better error */
}
}
}
}
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,182 | kadm5_modify_principal(void *server_handle,
kadm5_principal_ent_t entry, long mask)
{
int ret, ret2, i;
kadm5_policy_ent_rec npol, opol;
int have_npol = 0, have_opol = 0;
krb5_db_entry *kdb;
krb5_tl_data *tl_data_orig;
osa_princ_ent_rec adb;
kadm5_server_handle_t handle = server_handle;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if((mask & KADM5_PRINCIPAL) || (mask & KADM5_LAST_PWD_CHANGE) ||
(mask & KADM5_MOD_TIME) || (mask & KADM5_MOD_NAME) ||
(mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) ||
(mask & KADM5_KEY_DATA) || (mask & KADM5_LAST_SUCCESS) ||
(mask & KADM5_LAST_FAILED))
return KADM5_BAD_MASK;
if((mask & ~ALL_PRINC_MASK))
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR))
return KADM5_BAD_MASK;
if(entry == (kadm5_principal_ent_t) NULL)
return EINVAL;
if (mask & KADM5_TL_DATA) {
tl_data_orig = entry->tl_data;
while (tl_data_orig) {
if (tl_data_orig->tl_data_type < 256)
return KADM5_BAD_TL_TYPE;
tl_data_orig = tl_data_orig->tl_data_next;
}
}
ret = kdb_get_entry(handle, entry->principal, &kdb, &adb);
if (ret)
return(ret);
/*
* This is pretty much the same as create ...
*/
if ((mask & KADM5_POLICY)) {
/* get the new policy */
ret = kadm5_get_policy(handle->lhandle, entry->policy, &npol);
if (ret) {
switch (ret) {
case EINVAL:
ret = KADM5_BAD_POLICY;
break;
case KADM5_UNK_POLICY:
case KADM5_BAD_POLICY:
ret = KADM5_UNK_POLICY;
break;
}
goto done;
}
have_npol = 1;
/* if we already have a policy, get it to decrement the refcnt */
if(adb.aux_attributes & KADM5_POLICY) {
/* ... but not if the old and new are the same */
if(strcmp(adb.policy, entry->policy)) {
ret = kadm5_get_policy(handle->lhandle,
adb.policy, &opol);
switch(ret) {
case EINVAL:
case KADM5_BAD_POLICY:
case KADM5_UNK_POLICY:
break;
case KADM5_OK:
have_opol = 1;
opol.policy_refcnt--;
break;
default:
goto done;
break;
}
npol.policy_refcnt++;
}
} else npol.policy_refcnt++;
/* set us up to use the new policy */
adb.aux_attributes |= KADM5_POLICY;
if (adb.policy)
free(adb.policy);
adb.policy = strdup(entry->policy);
/* set pw_max_life based on new policy */
if (npol.pw_max_life) {
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb,
&(kdb->pw_expiration));
if (ret)
goto done;
kdb->pw_expiration += npol.pw_max_life;
} else {
kdb->pw_expiration = 0;
}
}
if ((mask & KADM5_POLICY_CLR) &&
(adb.aux_attributes & KADM5_POLICY)) {
ret = kadm5_get_policy(handle->lhandle, adb.policy, &opol);
switch(ret) {
case EINVAL:
case KADM5_BAD_POLICY:
case KADM5_UNK_POLICY:
ret = KADM5_BAD_DB;
goto done;
break;
case KADM5_OK:
have_opol = 1;
if (adb.policy)
free(adb.policy);
adb.policy = NULL;
adb.aux_attributes &= ~KADM5_POLICY;
kdb->pw_expiration = 0;
opol.policy_refcnt--;
break;
default:
goto done;
break;
}
}
if (((mask & KADM5_POLICY) || (mask & KADM5_POLICY_CLR)) &&
(((have_opol) &&
(ret =
kadm5_modify_policy_internal(handle->lhandle, &opol,
KADM5_REF_COUNT))) ||
((have_npol) &&
(ret =
kadm5_modify_policy_internal(handle->lhandle, &npol,
KADM5_REF_COUNT)))))
goto done;
if ((mask & KADM5_ATTRIBUTES))
kdb->attributes = entry->attributes;
if ((mask & KADM5_MAX_LIFE))
kdb->max_life = entry->max_life;
if ((mask & KADM5_PRINC_EXPIRE_TIME))
kdb->expiration = entry->princ_expire_time;
if (mask & KADM5_PW_EXPIRATION)
kdb->pw_expiration = entry->pw_expiration;
if (mask & KADM5_MAX_RLIFE)
kdb->max_renewable_life = entry->max_renewable_life;
if((mask & KADM5_KVNO)) {
for (i = 0; i < kdb->n_key_data; i++)
kdb->key_data[i].key_data_kvno = entry->kvno;
}
if (mask & KADM5_TL_DATA) {
krb5_tl_data *tl;
/* may have to change the version number of the API. Updates the list with the given tl_data rather than over-writting */
for (tl = entry->tl_data; tl;
tl = tl->tl_data_next)
{
ret = krb5_dbe_update_tl_data(handle->context, kdb, tl);
if( ret )
{
goto done;
}
}
}
/*
* Setting entry->fail_auth_count to 0 can be used to manually unlock
* an account. It is not possible to set fail_auth_count to any other
* value using kadmin.
*/
if (mask & KADM5_FAIL_AUTH_COUNT) {
if (entry->fail_auth_count != 0) {
ret = KADM5_BAD_SERVER_PARAMS;
goto done;
}
kdb->fail_auth_count = 0;
}
/* let the mask propagate to the database provider */
kdb->mask = mask;
ret = k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, entry, mask);
if (ret)
goto done;
ret = kdb_put_entry(handle, kdb, &adb);
if (ret) goto done;
(void) k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask);
ret = KADM5_OK;
done:
if (have_opol) {
ret2 = kadm5_free_policy_ent(handle->lhandle, &opol);
ret = ret ? ret : ret2;
}
if (have_npol) {
ret2 = kadm5_free_policy_ent(handle->lhandle, &npol);
ret = ret ? ret : ret2;
}
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| DoS | 0 | kadm5_modify_principal(void *server_handle,
kadm5_principal_ent_t entry, long mask)
{
int ret, ret2, i;
kadm5_policy_ent_rec npol, opol;
int have_npol = 0, have_opol = 0;
krb5_db_entry *kdb;
krb5_tl_data *tl_data_orig;
osa_princ_ent_rec adb;
kadm5_server_handle_t handle = server_handle;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if((mask & KADM5_PRINCIPAL) || (mask & KADM5_LAST_PWD_CHANGE) ||
(mask & KADM5_MOD_TIME) || (mask & KADM5_MOD_NAME) ||
(mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) ||
(mask & KADM5_KEY_DATA) || (mask & KADM5_LAST_SUCCESS) ||
(mask & KADM5_LAST_FAILED))
return KADM5_BAD_MASK;
if((mask & ~ALL_PRINC_MASK))
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR))
return KADM5_BAD_MASK;
if(entry == (kadm5_principal_ent_t) NULL)
return EINVAL;
if (mask & KADM5_TL_DATA) {
tl_data_orig = entry->tl_data;
while (tl_data_orig) {
if (tl_data_orig->tl_data_type < 256)
return KADM5_BAD_TL_TYPE;
tl_data_orig = tl_data_orig->tl_data_next;
}
}
ret = kdb_get_entry(handle, entry->principal, &kdb, &adb);
if (ret)
return(ret);
/*
* This is pretty much the same as create ...
*/
if ((mask & KADM5_POLICY)) {
/* get the new policy */
ret = kadm5_get_policy(handle->lhandle, entry->policy, &npol);
if (ret) {
switch (ret) {
case EINVAL:
ret = KADM5_BAD_POLICY;
break;
case KADM5_UNK_POLICY:
case KADM5_BAD_POLICY:
ret = KADM5_UNK_POLICY;
break;
}
goto done;
}
have_npol = 1;
/* if we already have a policy, get it to decrement the refcnt */
if(adb.aux_attributes & KADM5_POLICY) {
/* ... but not if the old and new are the same */
if(strcmp(adb.policy, entry->policy)) {
ret = kadm5_get_policy(handle->lhandle,
adb.policy, &opol);
switch(ret) {
case EINVAL:
case KADM5_BAD_POLICY:
case KADM5_UNK_POLICY:
break;
case KADM5_OK:
have_opol = 1;
opol.policy_refcnt--;
break;
default:
goto done;
break;
}
npol.policy_refcnt++;
}
} else npol.policy_refcnt++;
/* set us up to use the new policy */
adb.aux_attributes |= KADM5_POLICY;
if (adb.policy)
free(adb.policy);
adb.policy = strdup(entry->policy);
/* set pw_max_life based on new policy */
if (npol.pw_max_life) {
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb,
&(kdb->pw_expiration));
if (ret)
goto done;
kdb->pw_expiration += npol.pw_max_life;
} else {
kdb->pw_expiration = 0;
}
}
if ((mask & KADM5_POLICY_CLR) &&
(adb.aux_attributes & KADM5_POLICY)) {
ret = kadm5_get_policy(handle->lhandle, adb.policy, &opol);
switch(ret) {
case EINVAL:
case KADM5_BAD_POLICY:
case KADM5_UNK_POLICY:
ret = KADM5_BAD_DB;
goto done;
break;
case KADM5_OK:
have_opol = 1;
if (adb.policy)
free(adb.policy);
adb.policy = NULL;
adb.aux_attributes &= ~KADM5_POLICY;
kdb->pw_expiration = 0;
opol.policy_refcnt--;
break;
default:
goto done;
break;
}
}
if (((mask & KADM5_POLICY) || (mask & KADM5_POLICY_CLR)) &&
(((have_opol) &&
(ret =
kadm5_modify_policy_internal(handle->lhandle, &opol,
KADM5_REF_COUNT))) ||
((have_npol) &&
(ret =
kadm5_modify_policy_internal(handle->lhandle, &npol,
KADM5_REF_COUNT)))))
goto done;
if ((mask & KADM5_ATTRIBUTES))
kdb->attributes = entry->attributes;
if ((mask & KADM5_MAX_LIFE))
kdb->max_life = entry->max_life;
if ((mask & KADM5_PRINC_EXPIRE_TIME))
kdb->expiration = entry->princ_expire_time;
if (mask & KADM5_PW_EXPIRATION)
kdb->pw_expiration = entry->pw_expiration;
if (mask & KADM5_MAX_RLIFE)
kdb->max_renewable_life = entry->max_renewable_life;
if((mask & KADM5_KVNO)) {
for (i = 0; i < kdb->n_key_data; i++)
kdb->key_data[i].key_data_kvno = entry->kvno;
}
if (mask & KADM5_TL_DATA) {
krb5_tl_data *tl;
/* may have to change the version number of the API. Updates the list with the given tl_data rather than over-writting */
for (tl = entry->tl_data; tl;
tl = tl->tl_data_next)
{
ret = krb5_dbe_update_tl_data(handle->context, kdb, tl);
if( ret )
{
goto done;
}
}
}
/*
* Setting entry->fail_auth_count to 0 can be used to manually unlock
* an account. It is not possible to set fail_auth_count to any other
* value using kadmin.
*/
if (mask & KADM5_FAIL_AUTH_COUNT) {
if (entry->fail_auth_count != 0) {
ret = KADM5_BAD_SERVER_PARAMS;
goto done;
}
kdb->fail_auth_count = 0;
}
/* let the mask propagate to the database provider */
kdb->mask = mask;
ret = k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, entry, mask);
if (ret)
goto done;
ret = kdb_put_entry(handle, kdb, &adb);
if (ret) goto done;
(void) k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask);
ret = KADM5_OK;
done:
if (have_opol) {
ret2 = kadm5_free_policy_ent(handle->lhandle, &opol);
ret = ret ? ret : ret2;
}
if (have_npol) {
ret2 = kadm5_free_policy_ent(handle->lhandle, &npol);
ret = ret ? ret : ret2;
}
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,183 | kadm5_purgekeys(void *server_handle,
krb5_principal principal,
int keepkvno)
{
kadm5_server_handle_t handle = server_handle;
kadm5_ret_t ret;
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_key_data *old_keydata;
int n_old_keydata;
int i, j, k;
CHECK_HANDLE(server_handle);
if (principal == NULL)
return EINVAL;
ret = kdb_get_entry(handle, principal, &kdb, &adb);
if (ret)
return(ret);
if (keepkvno <= 0) {
keepkvno = krb5_db_get_key_data_kvno(handle->context, kdb->n_key_data,
kdb->key_data);
}
old_keydata = kdb->key_data;
n_old_keydata = kdb->n_key_data;
kdb->n_key_data = 0;
kdb->key_data = krb5_db_alloc(handle->context, NULL,
n_old_keydata * sizeof(krb5_key_data));
if (kdb->key_data == NULL) {
ret = ENOMEM;
goto done;
}
memset(kdb->key_data, 0, n_old_keydata * sizeof(krb5_key_data));
for (i = 0, j = 0; i < n_old_keydata; i++) {
if (old_keydata[i].key_data_kvno < keepkvno)
continue;
/* Alias the key_data_contents pointers; we null them out in the
* source array immediately after. */
kdb->key_data[j] = old_keydata[i];
for (k = 0; k < old_keydata[i].key_data_ver; k++) {
old_keydata[i].key_data_contents[k] = NULL;
}
j++;
}
kdb->n_key_data = j;
cleanup_key_data(handle->context, n_old_keydata, old_keydata);
kdb->mask = KADM5_KEY_DATA;
ret = kdb_put_entry(handle, kdb, &adb);
if (ret)
goto done;
done:
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| DoS | 0 | kadm5_purgekeys(void *server_handle,
krb5_principal principal,
int keepkvno)
{
kadm5_server_handle_t handle = server_handle;
kadm5_ret_t ret;
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_key_data *old_keydata;
int n_old_keydata;
int i, j, k;
CHECK_HANDLE(server_handle);
if (principal == NULL)
return EINVAL;
ret = kdb_get_entry(handle, principal, &kdb, &adb);
if (ret)
return(ret);
if (keepkvno <= 0) {
keepkvno = krb5_db_get_key_data_kvno(handle->context, kdb->n_key_data,
kdb->key_data);
}
old_keydata = kdb->key_data;
n_old_keydata = kdb->n_key_data;
kdb->n_key_data = 0;
kdb->key_data = krb5_db_alloc(handle->context, NULL,
n_old_keydata * sizeof(krb5_key_data));
if (kdb->key_data == NULL) {
ret = ENOMEM;
goto done;
}
memset(kdb->key_data, 0, n_old_keydata * sizeof(krb5_key_data));
for (i = 0, j = 0; i < n_old_keydata; i++) {
if (old_keydata[i].key_data_kvno < keepkvno)
continue;
/* Alias the key_data_contents pointers; we null them out in the
* source array immediately after. */
kdb->key_data[j] = old_keydata[i];
for (k = 0; k < old_keydata[i].key_data_ver; k++) {
old_keydata[i].key_data_contents[k] = NULL;
}
j++;
}
kdb->n_key_data = j;
cleanup_key_data(handle->context, n_old_keydata, old_keydata);
kdb->mask = KADM5_KEY_DATA;
ret = kdb_put_entry(handle, kdb, &adb);
if (ret)
goto done;
done:
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,184 | kadm5_randkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock **keyblocks,
int *n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
int ret, last_pwd, have_pol = 0;
kadm5_server_handle_t handle = server_handle;
krb5_keyblock *act_mkey;
if (keyblocks)
*keyblocks = NULL;
CHECK_HANDLE(server_handle);
/* Use default keysalts if caller did not provide any. */
if (n_ks_tuple == 0) {
ks_tuple = handle->params.keysalts;
n_ks_tuple = handle->params.num_keysalts;
}
krb5_clear_error_message(handle->context);
if (principal == NULL)
return EINVAL;
if (krb5_principal_compare(handle->context, principal, hist_princ)) {
/* If changing the history entry, the new entry must have exactly one
* key. */
if (keepold)
return KADM5_PROTECT_PRINCIPAL;
n_ks_tuple = 1;
}
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_crk(handle->context, act_mkey, ks_tuple, n_ks_tuple,
keepold, kdb);
if (ret)
goto done;
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd);
if (ret)
goto done;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if (keyblocks) {
ret = decrypt_key_data(handle->context,
kdb->n_key_data, kdb->key_data,
keyblocks, n_keys);
if (ret)
goto done;
}
/* key data changed, let the database provider know */
kdb->mask = KADM5_KEY_DATA | KADM5_FAIL_AUTH_COUNT;
/* | KADM5_RANDKEY_USED */;
ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold,
n_ks_tuple, ks_tuple, NULL);
if (ret)
goto done;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
(void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal,
keepold, n_ks_tuple, ks_tuple, NULL);
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| DoS | 0 | kadm5_randkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock **keyblocks,
int *n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
int ret, last_pwd, have_pol = 0;
kadm5_server_handle_t handle = server_handle;
krb5_keyblock *act_mkey;
if (keyblocks)
*keyblocks = NULL;
CHECK_HANDLE(server_handle);
/* Use default keysalts if caller did not provide any. */
if (n_ks_tuple == 0) {
ks_tuple = handle->params.keysalts;
n_ks_tuple = handle->params.num_keysalts;
}
krb5_clear_error_message(handle->context);
if (principal == NULL)
return EINVAL;
if (krb5_principal_compare(handle->context, principal, hist_princ)) {
/* If changing the history entry, the new entry must have exactly one
* key. */
if (keepold)
return KADM5_PROTECT_PRINCIPAL;
n_ks_tuple = 1;
}
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_crk(handle->context, act_mkey, ks_tuple, n_ks_tuple,
keepold, kdb);
if (ret)
goto done;
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd);
if (ret)
goto done;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if (keyblocks) {
ret = decrypt_key_data(handle->context,
kdb->n_key_data, kdb->key_data,
keyblocks, n_keys);
if (ret)
goto done;
}
/* key data changed, let the database provider know */
kdb->mask = KADM5_KEY_DATA | KADM5_FAIL_AUTH_COUNT;
/* | KADM5_RANDKEY_USED */;
ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold,
n_ks_tuple, ks_tuple, NULL);
if (ret)
goto done;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
(void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, principal,
keepold, n_ks_tuple, ks_tuple, NULL);
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,185 | kadm5_set_use_password_server (void)
{
use_password_server = 1;
}
| DoS | 0 | kadm5_set_use_password_server (void)
{
use_password_server = 1;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,186 | kadm5_setkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock *keyblocks,
int n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
krb5_key_data *old_key_data;
int n_old_keys;
int i, j, k, kvno, ret, have_pol = 0;
#if 0
int last_pwd;
#endif
kadm5_server_handle_t handle = server_handle;
krb5_boolean similar;
krb5_keysalt keysalt;
krb5_key_data tmp_key_data;
krb5_key_data *tptr;
krb5_keyblock *act_mkey;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL || keyblocks == NULL)
return EINVAL;
if (hist_princ && /* this will be NULL when initializing the databse */
((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE))
return KADM5_PROTECT_PRINCIPAL;
for (i = 0; i < n_keys; i++) {
for (j = i+1; j < n_keys; j++) {
if ((ret = krb5_c_enctype_compare(handle->context,
keyblocks[i].enctype,
keyblocks[j].enctype,
&similar)))
return(ret);
if (similar) {
if (n_ks_tuple) {
if (ks_tuple[i].ks_salttype == ks_tuple[j].ks_salttype)
return KADM5_SETKEY_DUP_ENCTYPES;
} else
return KADM5_SETKEY_DUP_ENCTYPES;
}
}
}
if (n_ks_tuple && n_ks_tuple != n_keys)
return KADM5_SETKEY3_ETYPE_MISMATCH;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
for (kvno = 0, i=0; i<kdb->n_key_data; i++)
if (kdb->key_data[i].key_data_kvno > kvno)
kvno = kdb->key_data[i].key_data_kvno;
if (keepold) {
old_key_data = kdb->key_data;
n_old_keys = kdb->n_key_data;
} else {
if (kdb->key_data != NULL)
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
n_old_keys = 0;
old_key_data = NULL;
}
kdb->key_data = (krb5_key_data*)krb5_db_alloc(handle->context, NULL, (n_keys+n_old_keys)
*sizeof(krb5_key_data));
if (kdb->key_data == NULL) {
ret = ENOMEM;
goto done;
}
memset(kdb->key_data, 0, (n_keys+n_old_keys)*sizeof(krb5_key_data));
kdb->n_key_data = 0;
for (i = 0; i < n_keys; i++) {
if (n_ks_tuple) {
keysalt.type = ks_tuple[i].ks_salttype;
keysalt.data.length = 0;
keysalt.data.data = NULL;
if (ks_tuple[i].ks_enctype != keyblocks[i].enctype) {
ret = KADM5_SETKEY3_ETYPE_MISMATCH;
goto done;
}
}
memset (&tmp_key_data, 0, sizeof(tmp_key_data));
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_encrypt_key_data(handle->context, act_mkey,
&keyblocks[i],
n_ks_tuple ? &keysalt : NULL, kvno + 1,
&tmp_key_data);
if (ret)
goto done;
tptr = &kdb->key_data[i];
tptr->key_data_ver = tmp_key_data.key_data_ver;
tptr->key_data_kvno = tmp_key_data.key_data_kvno;
for (k = 0; k < tmp_key_data.key_data_ver; k++) {
tptr->key_data_type[k] = tmp_key_data.key_data_type[k];
tptr->key_data_length[k] = tmp_key_data.key_data_length[k];
if (tmp_key_data.key_data_contents[k]) {
tptr->key_data_contents[k] = krb5_db_alloc(handle->context, NULL, tmp_key_data.key_data_length[k]);
if (tptr->key_data_contents[k] == NULL) {
int i1;
for (i1 = k; i1 < tmp_key_data.key_data_ver; i1++) {
if (tmp_key_data.key_data_contents[i1]) {
memset (tmp_key_data.key_data_contents[i1], 0, tmp_key_data.key_data_length[i1]);
free (tmp_key_data.key_data_contents[i1]);
}
}
ret = ENOMEM;
goto done;
}
memcpy (tptr->key_data_contents[k], tmp_key_data.key_data_contents[k], tmp_key_data.key_data_length[k]);
memset (tmp_key_data.key_data_contents[k], 0, tmp_key_data.key_data_length[k]);
free (tmp_key_data.key_data_contents[k]);
tmp_key_data.key_data_contents[k] = NULL;
}
}
kdb->n_key_data++;
}
/* copy old key data if necessary */
for (i = 0; i < n_old_keys; i++) {
kdb->key_data[i+n_keys] = old_key_data[i];
memset(&old_key_data[i], 0, sizeof (krb5_key_data));
kdb->n_key_data++;
}
if (old_key_data)
krb5_db_free(handle->context, old_key_data);
/* assert(kdb->n_key_data == n_keys + n_old_keys) */
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
if ((ret = krb5_timeofday(handle->context, &now)))
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if (ret = krb5_dbe_lookup_last_pwd_change(handle->context,
kdb, &last_pwd))
goto done;
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now)))
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| DoS | 0 | kadm5_setkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock *keyblocks,
int n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
krb5_key_data *old_key_data;
int n_old_keys;
int i, j, k, kvno, ret, have_pol = 0;
#if 0
int last_pwd;
#endif
kadm5_server_handle_t handle = server_handle;
krb5_boolean similar;
krb5_keysalt keysalt;
krb5_key_data tmp_key_data;
krb5_key_data *tptr;
krb5_keyblock *act_mkey;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL || keyblocks == NULL)
return EINVAL;
if (hist_princ && /* this will be NULL when initializing the databse */
((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE))
return KADM5_PROTECT_PRINCIPAL;
for (i = 0; i < n_keys; i++) {
for (j = i+1; j < n_keys; j++) {
if ((ret = krb5_c_enctype_compare(handle->context,
keyblocks[i].enctype,
keyblocks[j].enctype,
&similar)))
return(ret);
if (similar) {
if (n_ks_tuple) {
if (ks_tuple[i].ks_salttype == ks_tuple[j].ks_salttype)
return KADM5_SETKEY_DUP_ENCTYPES;
} else
return KADM5_SETKEY_DUP_ENCTYPES;
}
}
}
if (n_ks_tuple && n_ks_tuple != n_keys)
return KADM5_SETKEY3_ETYPE_MISMATCH;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
for (kvno = 0, i=0; i<kdb->n_key_data; i++)
if (kdb->key_data[i].key_data_kvno > kvno)
kvno = kdb->key_data[i].key_data_kvno;
if (keepold) {
old_key_data = kdb->key_data;
n_old_keys = kdb->n_key_data;
} else {
if (kdb->key_data != NULL)
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
n_old_keys = 0;
old_key_data = NULL;
}
kdb->key_data = (krb5_key_data*)krb5_db_alloc(handle->context, NULL, (n_keys+n_old_keys)
*sizeof(krb5_key_data));
if (kdb->key_data == NULL) {
ret = ENOMEM;
goto done;
}
memset(kdb->key_data, 0, (n_keys+n_old_keys)*sizeof(krb5_key_data));
kdb->n_key_data = 0;
for (i = 0; i < n_keys; i++) {
if (n_ks_tuple) {
keysalt.type = ks_tuple[i].ks_salttype;
keysalt.data.length = 0;
keysalt.data.data = NULL;
if (ks_tuple[i].ks_enctype != keyblocks[i].enctype) {
ret = KADM5_SETKEY3_ETYPE_MISMATCH;
goto done;
}
}
memset (&tmp_key_data, 0, sizeof(tmp_key_data));
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_encrypt_key_data(handle->context, act_mkey,
&keyblocks[i],
n_ks_tuple ? &keysalt : NULL, kvno + 1,
&tmp_key_data);
if (ret)
goto done;
tptr = &kdb->key_data[i];
tptr->key_data_ver = tmp_key_data.key_data_ver;
tptr->key_data_kvno = tmp_key_data.key_data_kvno;
for (k = 0; k < tmp_key_data.key_data_ver; k++) {
tptr->key_data_type[k] = tmp_key_data.key_data_type[k];
tptr->key_data_length[k] = tmp_key_data.key_data_length[k];
if (tmp_key_data.key_data_contents[k]) {
tptr->key_data_contents[k] = krb5_db_alloc(handle->context, NULL, tmp_key_data.key_data_length[k]);
if (tptr->key_data_contents[k] == NULL) {
int i1;
for (i1 = k; i1 < tmp_key_data.key_data_ver; i1++) {
if (tmp_key_data.key_data_contents[i1]) {
memset (tmp_key_data.key_data_contents[i1], 0, tmp_key_data.key_data_length[i1]);
free (tmp_key_data.key_data_contents[i1]);
}
}
ret = ENOMEM;
goto done;
}
memcpy (tptr->key_data_contents[k], tmp_key_data.key_data_contents[k], tmp_key_data.key_data_length[k]);
memset (tmp_key_data.key_data_contents[k], 0, tmp_key_data.key_data_length[k]);
free (tmp_key_data.key_data_contents[k]);
tmp_key_data.key_data_contents[k] = NULL;
}
}
kdb->n_key_data++;
}
/* copy old key data if necessary */
for (i = 0; i < n_old_keys; i++) {
kdb->key_data[i+n_keys] = old_key_data[i];
memset(&old_key_data[i], 0, sizeof (krb5_key_data));
kdb->n_key_data++;
}
if (old_key_data)
krb5_db_free(handle->context, old_key_data);
/* assert(kdb->n_key_data == n_keys + n_old_keys) */
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
if ((ret = krb5_timeofday(handle->context, &now)))
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if (ret = krb5_dbe_lookup_last_pwd_change(handle->context,
kdb, &last_pwd))
goto done;
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now)))
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,187 | kadm5_setv4key_principal(void *server_handle,
krb5_principal principal,
krb5_keyblock *keyblock)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
krb5_keysalt keysalt;
int i, k, kvno, ret, have_pol = 0;
#if 0
int last_pwd;
#endif
kadm5_server_handle_t handle = server_handle;
krb5_key_data tmp_key_data;
krb5_keyblock *act_mkey;
memset( &tmp_key_data, 0, sizeof(tmp_key_data));
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL || keyblock == NULL)
return EINVAL;
if (hist_princ && /* this will be NULL when initializing the databse */
((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE))
return KADM5_PROTECT_PRINCIPAL;
if (keyblock->enctype != ENCTYPE_DES_CBC_CRC)
return KADM5_SETV4KEY_INVAL_ENCTYPE;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
for (kvno = 0, i=0; i<kdb->n_key_data; i++)
if (kdb->key_data[i].key_data_kvno > kvno)
kvno = kdb->key_data[i].key_data_kvno;
if (kdb->key_data != NULL)
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
kdb->key_data = (krb5_key_data*)krb5_db_alloc(handle->context, NULL, sizeof(krb5_key_data));
if (kdb->key_data == NULL)
return ENOMEM;
memset(kdb->key_data, 0, sizeof(krb5_key_data));
kdb->n_key_data = 1;
keysalt.type = KRB5_KDB_SALTTYPE_V4;
/* XXX data.magic? */
keysalt.data.length = 0;
keysalt.data.data = NULL;
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
/* use tmp_key_data as temporary location and reallocate later */
ret = krb5_dbe_encrypt_key_data(handle->context, act_mkey, keyblock,
&keysalt, kvno + 1, &tmp_key_data);
if (ret) {
goto done;
}
for (k = 0; k < tmp_key_data.key_data_ver; k++) {
kdb->key_data->key_data_type[k] = tmp_key_data.key_data_type[k];
kdb->key_data->key_data_length[k] = tmp_key_data.key_data_length[k];
if (tmp_key_data.key_data_contents[k]) {
kdb->key_data->key_data_contents[k] = krb5_db_alloc(handle->context, NULL, tmp_key_data.key_data_length[k]);
if (kdb->key_data->key_data_contents[k] == NULL) {
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
kdb->key_data = NULL;
kdb->n_key_data = 0;
ret = ENOMEM;
goto done;
}
memcpy (kdb->key_data->key_data_contents[k], tmp_key_data.key_data_contents[k], tmp_key_data.key_data_length[k]);
memset (tmp_key_data.key_data_contents[k], 0, tmp_key_data.key_data_length[k]);
free (tmp_key_data.key_data_contents[k]);
tmp_key_data.key_data_contents[k] = NULL;
}
}
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if (ret = krb5_dbe_lookup_last_pwd_change(handle->context,
kdb, &last_pwd))
goto done;
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
ret = KADM5_OK;
done:
for (i = 0; i < tmp_key_data.key_data_ver; i++) {
if (tmp_key_data.key_data_contents[i]) {
memset (tmp_key_data.key_data_contents[i], 0, tmp_key_data.key_data_length[i]);
free (tmp_key_data.key_data_contents[i]);
}
}
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| DoS | 0 | kadm5_setv4key_principal(void *server_handle,
krb5_principal principal,
krb5_keyblock *keyblock)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
krb5_keysalt keysalt;
int i, k, kvno, ret, have_pol = 0;
#if 0
int last_pwd;
#endif
kadm5_server_handle_t handle = server_handle;
krb5_key_data tmp_key_data;
krb5_keyblock *act_mkey;
memset( &tmp_key_data, 0, sizeof(tmp_key_data));
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL || keyblock == NULL)
return EINVAL;
if (hist_princ && /* this will be NULL when initializing the databse */
((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE))
return KADM5_PROTECT_PRINCIPAL;
if (keyblock->enctype != ENCTYPE_DES_CBC_CRC)
return KADM5_SETV4KEY_INVAL_ENCTYPE;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
for (kvno = 0, i=0; i<kdb->n_key_data; i++)
if (kdb->key_data[i].key_data_kvno > kvno)
kvno = kdb->key_data[i].key_data_kvno;
if (kdb->key_data != NULL)
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
kdb->key_data = (krb5_key_data*)krb5_db_alloc(handle->context, NULL, sizeof(krb5_key_data));
if (kdb->key_data == NULL)
return ENOMEM;
memset(kdb->key_data, 0, sizeof(krb5_key_data));
kdb->n_key_data = 1;
keysalt.type = KRB5_KDB_SALTTYPE_V4;
/* XXX data.magic? */
keysalt.data.length = 0;
keysalt.data.data = NULL;
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
/* use tmp_key_data as temporary location and reallocate later */
ret = krb5_dbe_encrypt_key_data(handle->context, act_mkey, keyblock,
&keysalt, kvno + 1, &tmp_key_data);
if (ret) {
goto done;
}
for (k = 0; k < tmp_key_data.key_data_ver; k++) {
kdb->key_data->key_data_type[k] = tmp_key_data.key_data_type[k];
kdb->key_data->key_data_length[k] = tmp_key_data.key_data_length[k];
if (tmp_key_data.key_data_contents[k]) {
kdb->key_data->key_data_contents[k] = krb5_db_alloc(handle->context, NULL, tmp_key_data.key_data_length[k]);
if (kdb->key_data->key_data_contents[k] == NULL) {
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
kdb->key_data = NULL;
kdb->n_key_data = 0;
ret = ENOMEM;
goto done;
}
memcpy (kdb->key_data->key_data_contents[k], tmp_key_data.key_data_contents[k], tmp_key_data.key_data_length[k]);
memset (tmp_key_data.key_data_contents[k], 0, tmp_key_data.key_data_length[k]);
free (tmp_key_data.key_data_contents[k]);
tmp_key_data.key_data_contents[k] = NULL;
}
}
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
ret = krb5_timeofday(handle->context, &now);
if (ret)
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if (ret = krb5_dbe_lookup_last_pwd_change(handle->context,
kdb, &last_pwd))
goto done;
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now);
if (ret)
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
ret = KADM5_OK;
done:
for (i = 0; i < tmp_key_data.key_data_ver; i++) {
if (tmp_key_data.key_data_contents[i]) {
memset (tmp_key_data.key_data_contents[i], 0, tmp_key_data.key_data_length[i]);
free (tmp_key_data.key_data_contents[i]);
}
}
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,188 | kadm5_ret_t krb5_copy_key_data_contents(context, from, to)
krb5_context context;
krb5_key_data *from, *to;
{
int i, idx;
*to = *from;
idx = (from->key_data_ver == 1 ? 1 : 2);
for (i = 0; i < idx; i++) {
if ( from->key_data_length[i] ) {
to->key_data_contents[i] = malloc(from->key_data_length[i]);
if (to->key_data_contents[i] == NULL) {
for (i = 0; i < idx; i++) {
if (to->key_data_contents[i]) {
memset(to->key_data_contents[i], 0,
to->key_data_length[i]);
free(to->key_data_contents[i]);
}
}
return ENOMEM;
}
memcpy(to->key_data_contents[i], from->key_data_contents[i],
from->key_data_length[i]);
}
}
return 0;
}
| DoS | 0 | kadm5_ret_t krb5_copy_key_data_contents(context, from, to)
krb5_context context;
krb5_key_data *from, *to;
{
int i, idx;
*to = *from;
idx = (from->key_data_ver == 1 ? 1 : 2);
for (i = 0; i < idx; i++) {
if ( from->key_data_length[i] ) {
to->key_data_contents[i] = malloc(from->key_data_length[i]);
if (to->key_data_contents[i] == NULL) {
for (i = 0; i < idx; i++) {
if (to->key_data_contents[i]) {
memset(to->key_data_contents[i], 0,
to->key_data_length[i]);
free(to->key_data_contents[i]);
}
}
return ENOMEM;
}
memcpy(to->key_data_contents[i], from->key_data_contents[i],
from->key_data_length[i]);
}
}
return 0;
}
| @@ -186,7 +186,7 @@ check_1_6_dummy(kadm5_principal_ent_t entry, long mask,
char *password = *passptr;
/* Old-style randkey operations disallowed tickets to start. */
- if (!(mask & KADM5_ATTRIBUTES) ||
+ if (password == NULL || !(mask & KADM5_ATTRIBUTES) ||
!(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX))
return;
| null | null | null |
17,189 | SYSCALL_DEFINE0(getpgrp)
{
return sys_getpgid(0);
}
| +Info | 0 | SYSCALL_DEFINE0(getpgrp)
{
return sys_getpgid(0);
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,190 | SYSCALL_DEFINE0(setsid)
{
struct task_struct *group_leader = current->group_leader;
struct pid *sid = task_pid(group_leader);
pid_t session = pid_vnr(sid);
int err = -EPERM;
write_lock_irq(&tasklist_lock);
/* Fail if I am already a session leader */
if (group_leader->signal->leader)
goto out;
/* Fail if a process group id already exists that equals the
* proposed session id.
*/
if (pid_task(sid, PIDTYPE_PGID))
goto out;
group_leader->signal->leader = 1;
__set_special_pids(sid);
proc_clear_tty(group_leader);
err = session;
out:
write_unlock_irq(&tasklist_lock);
if (err > 0) {
proc_sid_connector(group_leader);
sched_autogroup_create_attach(group_leader);
}
return err;
}
| +Info | 0 | SYSCALL_DEFINE0(setsid)
{
struct task_struct *group_leader = current->group_leader;
struct pid *sid = task_pid(group_leader);
pid_t session = pid_vnr(sid);
int err = -EPERM;
write_lock_irq(&tasklist_lock);
/* Fail if I am already a session leader */
if (group_leader->signal->leader)
goto out;
/* Fail if a process group id already exists that equals the
* proposed session id.
*/
if (pid_task(sid, PIDTYPE_PGID))
goto out;
group_leader->signal->leader = 1;
__set_special_pids(sid);
proc_clear_tty(group_leader);
err = session;
out:
write_unlock_irq(&tasklist_lock);
if (err > 0) {
proc_sid_connector(group_leader);
sched_autogroup_create_attach(group_leader);
}
return err;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,191 | SYSCALL_DEFINE1(setgid, gid_t, gid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kgid_t kgid;
kgid = make_kgid(ns, gid);
if (!gid_valid(kgid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (nsown_capable(CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = kgid;
else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
new->egid = new->fsgid = kgid;
else
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
| +Info | 0 | SYSCALL_DEFINE1(setgid, gid_t, gid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kgid_t kgid;
kgid = make_kgid(ns, gid);
if (!gid_valid(kgid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (nsown_capable(CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = kgid;
else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
new->egid = new->fsgid = kgid;
else
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,192 | SYSCALL_DEFINE1(setuid, uid_t, uid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kuid_t kuid;
kuid = make_kuid(ns, uid);
if (!uid_valid(kuid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (nsown_capable(CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
goto error;
}
new->fsuid = new->euid = kuid;
retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
if (retval < 0)
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
| +Info | 0 | SYSCALL_DEFINE1(setuid, uid_t, uid)
{
struct user_namespace *ns = current_user_ns();
const struct cred *old;
struct cred *new;
int retval;
kuid_t kuid;
kuid = make_kuid(ns, uid);
if (!uid_valid(kuid))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
retval = -EPERM;
if (nsown_capable(CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
retval = set_user(new);
if (retval < 0)
goto error;
}
} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
goto error;
}
new->fsuid = new->euid = kuid;
retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
if (retval < 0)
goto error;
return commit_creds(new);
error:
abort_creds(new);
return retval;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,193 | SYSCALL_DEFINE1(setfsuid, uid_t, uid)
{
const struct cred *old;
struct cred *new;
uid_t old_fsuid;
kuid_t kuid;
old = current_cred();
old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
kuid = make_kuid(old->user_ns, uid);
if (!uid_valid(kuid))
return old_fsuid;
new = prepare_creds();
if (!new)
return old_fsuid;
if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
nsown_capable(CAP_SETUID)) {
if (!uid_eq(kuid, old->fsuid)) {
new->fsuid = kuid;
if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
goto change_okay;
}
}
abort_creds(new);
return old_fsuid;
change_okay:
commit_creds(new);
return old_fsuid;
}
| +Info | 0 | SYSCALL_DEFINE1(setfsuid, uid_t, uid)
{
const struct cred *old;
struct cred *new;
uid_t old_fsuid;
kuid_t kuid;
old = current_cred();
old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
kuid = make_kuid(old->user_ns, uid);
if (!uid_valid(kuid))
return old_fsuid;
new = prepare_creds();
if (!new)
return old_fsuid;
if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
nsown_capable(CAP_SETUID)) {
if (!uid_eq(kuid, old->fsuid)) {
new->fsuid = kuid;
if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
goto change_okay;
}
}
abort_creds(new);
return old_fsuid;
change_okay:
commit_creds(new);
return old_fsuid;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,194 | SYSCALL_DEFINE1(setfsgid, gid_t, gid)
{
const struct cred *old;
struct cred *new;
gid_t old_fsgid;
kgid_t kgid;
old = current_cred();
old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
kgid = make_kgid(old->user_ns, gid);
if (!gid_valid(kgid))
return old_fsgid;
new = prepare_creds();
if (!new)
return old_fsgid;
if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
nsown_capable(CAP_SETGID)) {
if (!gid_eq(kgid, old->fsgid)) {
new->fsgid = kgid;
goto change_okay;
}
}
abort_creds(new);
return old_fsgid;
change_okay:
commit_creds(new);
return old_fsgid;
}
| +Info | 0 | SYSCALL_DEFINE1(setfsgid, gid_t, gid)
{
const struct cred *old;
struct cred *new;
gid_t old_fsgid;
kgid_t kgid;
old = current_cred();
old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
kgid = make_kgid(old->user_ns, gid);
if (!gid_valid(kgid))
return old_fsgid;
new = prepare_creds();
if (!new)
return old_fsgid;
if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
nsown_capable(CAP_SETGID)) {
if (!gid_eq(kgid, old->fsgid)) {
new->fsgid = kgid;
goto change_okay;
}
}
abort_creds(new);
return old_fsgid;
change_okay:
commit_creds(new);
return old_fsgid;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,195 | SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
{
if (tbuf) {
struct tms tmp;
do_sys_times(&tmp);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
force_successful_syscall_return();
return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
| +Info | 0 | SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
{
if (tbuf) {
struct tms tmp;
do_sys_times(&tmp);
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
return -EFAULT;
}
force_successful_syscall_return();
return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,196 | SYSCALL_DEFINE1(getpgid, pid_t, pid)
{
struct task_struct *p;
struct pid *grp;
int retval;
rcu_read_lock();
if (!pid)
grp = task_pgrp(current);
else {
retval = -ESRCH;
p = find_task_by_vpid(pid);
if (!p)
goto out;
grp = task_pgrp(p);
if (!grp)
goto out;
retval = security_task_getpgid(p);
if (retval)
goto out;
}
retval = pid_vnr(grp);
out:
rcu_read_unlock();
return retval;
}
| +Info | 0 | SYSCALL_DEFINE1(getpgid, pid_t, pid)
{
struct task_struct *p;
struct pid *grp;
int retval;
rcu_read_lock();
if (!pid)
grp = task_pgrp(current);
else {
retval = -ESRCH;
p = find_task_by_vpid(pid);
if (!p)
goto out;
grp = task_pgrp(p);
if (!grp)
goto out;
retval = security_task_getpgid(p);
if (retval)
goto out;
}
retval = pid_vnr(grp);
out:
rcu_read_unlock();
return retval;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,197 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
int errno = 0;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
up_read(&uts_sem);
if (!errno && override_release(name->release, sizeof(name->release)))
errno = -EFAULT;
if (!errno && override_architecture(name))
errno = -EFAULT;
return errno;
}
| +Info | 0 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
int errno = 0;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
up_read(&uts_sem);
if (!errno && override_release(name->release, sizeof(name->release)))
errno = -EFAULT;
if (!errno && override_architecture(name))
errno = -EFAULT;
return errno;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,198 | SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
int error;
if (!name)
return -EFAULT;
if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
return -EFAULT;
down_read(&uts_sem);
error = __copy_to_user(&name->sysname, &utsname()->sysname,
__OLD_UTS_LEN);
error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
error |= __copy_to_user(&name->nodename, &utsname()->nodename,
__OLD_UTS_LEN);
error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
error |= __copy_to_user(&name->release, &utsname()->release,
__OLD_UTS_LEN);
error |= __put_user(0, name->release + __OLD_UTS_LEN);
error |= __copy_to_user(&name->version, &utsname()->version,
__OLD_UTS_LEN);
error |= __put_user(0, name->version + __OLD_UTS_LEN);
error |= __copy_to_user(&name->machine, &utsname()->machine,
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
if (!error && override_architecture(name))
error = -EFAULT;
if (!error && override_release(name->release, sizeof(name->release)))
error = -EFAULT;
return error ? -EFAULT : 0;
}
| +Info | 0 | SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
int error;
if (!name)
return -EFAULT;
if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
return -EFAULT;
down_read(&uts_sem);
error = __copy_to_user(&name->sysname, &utsname()->sysname,
__OLD_UTS_LEN);
error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
error |= __copy_to_user(&name->nodename, &utsname()->nodename,
__OLD_UTS_LEN);
error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
error |= __copy_to_user(&name->release, &utsname()->release,
__OLD_UTS_LEN);
error |= __put_user(0, name->release + __OLD_UTS_LEN);
error |= __copy_to_user(&name->version, &utsname()->version,
__OLD_UTS_LEN);
error |= __put_user(0, name->version + __OLD_UTS_LEN);
error |= __copy_to_user(&name->machine, &utsname()->machine,
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
up_read(&uts_sem);
if (!error && override_architecture(name))
error = -EFAULT;
if (!error && override_release(name->release, sizeof(name->release)))
error = -EFAULT;
return error ? -EFAULT : 0;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
17,199 | SYSCALL_DEFINE1(umask, int, mask)
{
mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
return mask;
}
| +Info | 0 | SYSCALL_DEFINE1(umask, int, mask)
{
mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
return mask;
}
| @@ -1265,15 +1265,16 @@ DECLARE_RWSEM(uts_sem);
* Work around broken programs that cannot handle "Linux 3.0".
* Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
*/
-static int override_release(char __user *release, int len)
+static int override_release(char __user *release, size_t len)
{
int ret = 0;
- char buf[65];
if (current->personality & UNAME26) {
- char *rest = UTS_RELEASE;
+ const char *rest = UTS_RELEASE;
+ char buf[65] = { 0 };
int ndots = 0;
unsigned v;
+ size_t copy;
while (*rest) {
if (*rest == '.' && ++ndots >= 3)
@@ -1283,8 +1284,9 @@ static int override_release(char __user *release, int len)
rest++;
}
v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
- snprintf(buf, len, "2.6.%u%s", v, rest);
- ret = copy_to_user(release, buf, len);
+ copy = min(sizeof(buf), max_t(size_t, 1, len));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
}
return ret;
} | CWE-16 | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.